]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.32.42-201106281648.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.42-201106281648.patch
1 diff -urNp linux-2.6.32.42/arch/alpha/include/asm/elf.h linux-2.6.32.42/arch/alpha/include/asm/elf.h
2 --- linux-2.6.32.42/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3 +++ linux-2.6.32.42/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.32.42/arch/alpha/include/asm/pgtable.h linux-2.6.32.42/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.32.42/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20 +++ linux-2.6.32.42/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.32.42/arch/alpha/kernel/module.c linux-2.6.32.42/arch/alpha/kernel/module.c
40 --- linux-2.6.32.42/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41 +++ linux-2.6.32.42/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.32.42/arch/alpha/kernel/osf_sys.c linux-2.6.32.42/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.32.42/arch/alpha/kernel/osf_sys.c 2011-03-27 14:31:47.000000000 -0400
53 +++ linux-2.6.32.42/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54 @@ -431,7 +431,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char
55 return -EFAULT;
56
57 len = namelen;
58 - if (namelen > 32)
59 + if (len > 32)
60 len = 32;
61
62 down_read(&uts_sem);
63 @@ -618,7 +618,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, comman
64 down_read(&uts_sem);
65 res = sysinfo_table[offset];
66 len = strlen(res)+1;
67 - if (len > count)
68 + if ((unsigned long)len > (unsigned long)count)
69 len = count;
70 if (copy_to_user(buf, res, len))
71 err = -EFAULT;
72 @@ -673,7 +673,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned
73 return 1;
74
75 case GSI_GET_HWRPB:
76 - if (nbytes < sizeof(*hwrpb))
77 + if (nbytes > sizeof(*hwrpb))
78 return -EINVAL;
79 if (copy_to_user(buffer, hwrpb, nbytes) != 0)
80 return -EFAULT;
81 @@ -1035,6 +1035,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
82 {
83 struct rusage r;
84 long ret, err;
85 + unsigned int status = 0;
86 mm_segment_t old_fs;
87
88 if (!ur)
89 @@ -1043,13 +1044,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
90 old_fs = get_fs();
91
92 set_fs (KERNEL_DS);
93 - ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
94 + ret = sys_wait4(pid, (unsigned int __user *) &status, options,
95 + (struct rusage __user *) &r);
96 set_fs (old_fs);
97
98 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
99 return -EFAULT;
100
101 err = 0;
102 + err |= put_user(status, ustatus);
103 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
104 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
105 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
106 @@ -1169,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
107 /* At this point: (!vma || addr < vma->vm_end). */
108 if (limit - len < addr)
109 return -ENOMEM;
110 - if (!vma || addr + len <= vma->vm_start)
111 + if (check_heap_stack_gap(vma, addr, len))
112 return addr;
113 addr = vma->vm_end;
114 vma = vma->vm_next;
115 @@ -1205,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
116 merely specific addresses, but regions of memory -- perhaps
117 this feature should be incorporated into all ports? */
118
119 +#ifdef CONFIG_PAX_RANDMMAP
120 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
121 +#endif
122 +
123 if (addr) {
124 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
125 if (addr != (unsigned long) -ENOMEM)
126 @@ -1212,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
127 }
128
129 /* Next, try allocating at TASK_UNMAPPED_BASE. */
130 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
131 - len, limit);
132 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
133 +
134 if (addr != (unsigned long) -ENOMEM)
135 return addr;
136
137 diff -urNp linux-2.6.32.42/arch/alpha/mm/fault.c linux-2.6.32.42/arch/alpha/mm/fault.c
138 --- linux-2.6.32.42/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
139 +++ linux-2.6.32.42/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
140 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
141 __reload_thread(pcb);
142 }
143
144 +#ifdef CONFIG_PAX_PAGEEXEC
145 +/*
146 + * PaX: decide what to do with offenders (regs->pc = fault address)
147 + *
148 + * returns 1 when task should be killed
149 + * 2 when patched PLT trampoline was detected
150 + * 3 when unpatched PLT trampoline was detected
151 + */
152 +static int pax_handle_fetch_fault(struct pt_regs *regs)
153 +{
154 +
155 +#ifdef CONFIG_PAX_EMUPLT
156 + int err;
157 +
158 + do { /* PaX: patched PLT emulation #1 */
159 + unsigned int ldah, ldq, jmp;
160 +
161 + err = get_user(ldah, (unsigned int *)regs->pc);
162 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
163 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
164 +
165 + if (err)
166 + break;
167 +
168 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
169 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
170 + jmp == 0x6BFB0000U)
171 + {
172 + unsigned long r27, addr;
173 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
174 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
175 +
176 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
177 + err = get_user(r27, (unsigned long *)addr);
178 + if (err)
179 + break;
180 +
181 + regs->r27 = r27;
182 + regs->pc = r27;
183 + return 2;
184 + }
185 + } while (0);
186 +
187 + do { /* PaX: patched PLT emulation #2 */
188 + unsigned int ldah, lda, br;
189 +
190 + err = get_user(ldah, (unsigned int *)regs->pc);
191 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
192 + err |= get_user(br, (unsigned int *)(regs->pc+8));
193 +
194 + if (err)
195 + break;
196 +
197 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
198 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
199 + (br & 0xFFE00000U) == 0xC3E00000U)
200 + {
201 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
202 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
203 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
204 +
205 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
206 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
207 + return 2;
208 + }
209 + } while (0);
210 +
211 + do { /* PaX: unpatched PLT emulation */
212 + unsigned int br;
213 +
214 + err = get_user(br, (unsigned int *)regs->pc);
215 +
216 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
217 + unsigned int br2, ldq, nop, jmp;
218 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
219 +
220 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
221 + err = get_user(br2, (unsigned int *)addr);
222 + err |= get_user(ldq, (unsigned int *)(addr+4));
223 + err |= get_user(nop, (unsigned int *)(addr+8));
224 + err |= get_user(jmp, (unsigned int *)(addr+12));
225 + err |= get_user(resolver, (unsigned long *)(addr+16));
226 +
227 + if (err)
228 + break;
229 +
230 + if (br2 == 0xC3600000U &&
231 + ldq == 0xA77B000CU &&
232 + nop == 0x47FF041FU &&
233 + jmp == 0x6B7B0000U)
234 + {
235 + regs->r28 = regs->pc+4;
236 + regs->r27 = addr+16;
237 + regs->pc = resolver;
238 + return 3;
239 + }
240 + }
241 + } while (0);
242 +#endif
243 +
244 + return 1;
245 +}
246 +
247 +void pax_report_insns(void *pc, void *sp)
248 +{
249 + unsigned long i;
250 +
251 + printk(KERN_ERR "PAX: bytes at PC: ");
252 + for (i = 0; i < 5; i++) {
253 + unsigned int c;
254 + if (get_user(c, (unsigned int *)pc+i))
255 + printk(KERN_CONT "???????? ");
256 + else
257 + printk(KERN_CONT "%08x ", c);
258 + }
259 + printk("\n");
260 +}
261 +#endif
262
263 /*
264 * This routine handles page faults. It determines the address,
265 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
266 good_area:
267 si_code = SEGV_ACCERR;
268 if (cause < 0) {
269 - if (!(vma->vm_flags & VM_EXEC))
270 + if (!(vma->vm_flags & VM_EXEC)) {
271 +
272 +#ifdef CONFIG_PAX_PAGEEXEC
273 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
274 + goto bad_area;
275 +
276 + up_read(&mm->mmap_sem);
277 + switch (pax_handle_fetch_fault(regs)) {
278 +
279 +#ifdef CONFIG_PAX_EMUPLT
280 + case 2:
281 + case 3:
282 + return;
283 +#endif
284 +
285 + }
286 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
287 + do_group_exit(SIGKILL);
288 +#else
289 goto bad_area;
290 +#endif
291 +
292 + }
293 } else if (!cause) {
294 /* Allow reads even for write-only mappings */
295 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
296 diff -urNp linux-2.6.32.42/arch/arm/include/asm/elf.h linux-2.6.32.42/arch/arm/include/asm/elf.h
297 --- linux-2.6.32.42/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
298 +++ linux-2.6.32.42/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
299 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
300 the loader. We need to make sure that it is out of the way of the program
301 that it will "exec", and that there is sufficient room for the brk. */
302
303 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
304 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
305 +
306 +#ifdef CONFIG_PAX_ASLR
307 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
308 +
309 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
310 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
311 +#endif
312
313 /* When the program starts, a1 contains a pointer to a function to be
314 registered with atexit, as per the SVR4 ABI. A value of 0 means we
315 diff -urNp linux-2.6.32.42/arch/arm/include/asm/kmap_types.h linux-2.6.32.42/arch/arm/include/asm/kmap_types.h
316 --- linux-2.6.32.42/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
317 +++ linux-2.6.32.42/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
318 @@ -19,6 +19,7 @@ enum km_type {
319 KM_SOFTIRQ0,
320 KM_SOFTIRQ1,
321 KM_L2_CACHE,
322 + KM_CLEARPAGE,
323 KM_TYPE_NR
324 };
325
326 diff -urNp linux-2.6.32.42/arch/arm/include/asm/uaccess.h linux-2.6.32.42/arch/arm/include/asm/uaccess.h
327 --- linux-2.6.32.42/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
328 +++ linux-2.6.32.42/arch/arm/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
329 @@ -403,6 +403,9 @@ extern unsigned long __must_check __strn
330
331 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
332 {
333 + if ((long)n < 0)
334 + return n;
335 +
336 if (access_ok(VERIFY_READ, from, n))
337 n = __copy_from_user(to, from, n);
338 else /* security hole - plug it */
339 @@ -412,6 +415,9 @@ static inline unsigned long __must_check
340
341 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
342 {
343 + if ((long)n < 0)
344 + return n;
345 +
346 if (access_ok(VERIFY_WRITE, to, n))
347 n = __copy_to_user(to, from, n);
348 return n;
349 diff -urNp linux-2.6.32.42/arch/arm/kernel/kgdb.c linux-2.6.32.42/arch/arm/kernel/kgdb.c
350 --- linux-2.6.32.42/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
351 +++ linux-2.6.32.42/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
352 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
353 * and we handle the normal undef case within the do_undefinstr
354 * handler.
355 */
356 -struct kgdb_arch arch_kgdb_ops = {
357 +const struct kgdb_arch arch_kgdb_ops = {
358 #ifndef __ARMEB__
359 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
360 #else /* ! __ARMEB__ */
361 diff -urNp linux-2.6.32.42/arch/arm/kernel/traps.c linux-2.6.32.42/arch/arm/kernel/traps.c
362 --- linux-2.6.32.42/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
363 +++ linux-2.6.32.42/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
364 @@ -247,6 +247,8 @@ static void __die(const char *str, int e
365
366 DEFINE_SPINLOCK(die_lock);
367
368 +extern void gr_handle_kernel_exploit(void);
369 +
370 /*
371 * This function is protected against re-entrancy.
372 */
373 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
374 if (panic_on_oops)
375 panic("Fatal exception");
376
377 + gr_handle_kernel_exploit();
378 +
379 do_exit(SIGSEGV);
380 }
381
382 diff -urNp linux-2.6.32.42/arch/arm/mach-at91/pm.c linux-2.6.32.42/arch/arm/mach-at91/pm.c
383 --- linux-2.6.32.42/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
384 +++ linux-2.6.32.42/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
385 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
386 }
387
388
389 -static struct platform_suspend_ops at91_pm_ops ={
390 +static const struct platform_suspend_ops at91_pm_ops ={
391 .valid = at91_pm_valid_state,
392 .begin = at91_pm_begin,
393 .enter = at91_pm_enter,
394 diff -urNp linux-2.6.32.42/arch/arm/mach-omap1/pm.c linux-2.6.32.42/arch/arm/mach-omap1/pm.c
395 --- linux-2.6.32.42/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
396 +++ linux-2.6.32.42/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
397 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
398
399
400
401 -static struct platform_suspend_ops omap_pm_ops ={
402 +static const struct platform_suspend_ops omap_pm_ops ={
403 .prepare = omap_pm_prepare,
404 .enter = omap_pm_enter,
405 .finish = omap_pm_finish,
406 diff -urNp linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c
407 --- linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
408 +++ linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
409 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
410 enable_hlt();
411 }
412
413 -static struct platform_suspend_ops omap_pm_ops = {
414 +static const struct platform_suspend_ops omap_pm_ops = {
415 .prepare = omap2_pm_prepare,
416 .enter = omap2_pm_enter,
417 .finish = omap2_pm_finish,
418 diff -urNp linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c
419 --- linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
420 +++ linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
421 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
422 return;
423 }
424
425 -static struct platform_suspend_ops omap_pm_ops = {
426 +static const struct platform_suspend_ops omap_pm_ops = {
427 .begin = omap3_pm_begin,
428 .end = omap3_pm_end,
429 .prepare = omap3_pm_prepare,
430 diff -urNp linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c
431 --- linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
432 +++ linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
433 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
434 (state == PM_SUSPEND_MEM);
435 }
436
437 -static struct platform_suspend_ops pnx4008_pm_ops = {
438 +static const struct platform_suspend_ops pnx4008_pm_ops = {
439 .enter = pnx4008_pm_enter,
440 .valid = pnx4008_pm_valid,
441 };
442 diff -urNp linux-2.6.32.42/arch/arm/mach-pxa/pm.c linux-2.6.32.42/arch/arm/mach-pxa/pm.c
443 --- linux-2.6.32.42/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
444 +++ linux-2.6.32.42/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
445 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
446 pxa_cpu_pm_fns->finish();
447 }
448
449 -static struct platform_suspend_ops pxa_pm_ops = {
450 +static const struct platform_suspend_ops pxa_pm_ops = {
451 .valid = pxa_pm_valid,
452 .enter = pxa_pm_enter,
453 .prepare = pxa_pm_prepare,
454 diff -urNp linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c
455 --- linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
456 +++ linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
457 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
458 }
459
460 #ifdef CONFIG_PM
461 -static struct platform_suspend_ops sharpsl_pm_ops = {
462 +static const struct platform_suspend_ops sharpsl_pm_ops = {
463 .prepare = pxa_pm_prepare,
464 .finish = pxa_pm_finish,
465 .enter = corgi_pxa_pm_enter,
466 diff -urNp linux-2.6.32.42/arch/arm/mach-sa1100/pm.c linux-2.6.32.42/arch/arm/mach-sa1100/pm.c
467 --- linux-2.6.32.42/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
468 +++ linux-2.6.32.42/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
469 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
470 return virt_to_phys(sp);
471 }
472
473 -static struct platform_suspend_ops sa11x0_pm_ops = {
474 +static const struct platform_suspend_ops sa11x0_pm_ops = {
475 .enter = sa11x0_pm_enter,
476 .valid = suspend_valid_only_mem,
477 };
478 diff -urNp linux-2.6.32.42/arch/arm/mm/fault.c linux-2.6.32.42/arch/arm/mm/fault.c
479 --- linux-2.6.32.42/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
480 +++ linux-2.6.32.42/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
481 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
482 }
483 #endif
484
485 +#ifdef CONFIG_PAX_PAGEEXEC
486 + if (fsr & FSR_LNX_PF) {
487 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
488 + do_group_exit(SIGKILL);
489 + }
490 +#endif
491 +
492 tsk->thread.address = addr;
493 tsk->thread.error_code = fsr;
494 tsk->thread.trap_no = 14;
495 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
496 }
497 #endif /* CONFIG_MMU */
498
499 +#ifdef CONFIG_PAX_PAGEEXEC
500 +void pax_report_insns(void *pc, void *sp)
501 +{
502 + long i;
503 +
504 + printk(KERN_ERR "PAX: bytes at PC: ");
505 + for (i = 0; i < 20; i++) {
506 + unsigned char c;
507 + if (get_user(c, (__force unsigned char __user *)pc+i))
508 + printk(KERN_CONT "?? ");
509 + else
510 + printk(KERN_CONT "%02x ", c);
511 + }
512 + printk("\n");
513 +
514 + printk(KERN_ERR "PAX: bytes at SP-4: ");
515 + for (i = -1; i < 20; i++) {
516 + unsigned long c;
517 + if (get_user(c, (__force unsigned long __user *)sp+i))
518 + printk(KERN_CONT "???????? ");
519 + else
520 + printk(KERN_CONT "%08lx ", c);
521 + }
522 + printk("\n");
523 +}
524 +#endif
525 +
526 /*
527 * First Level Translation Fault Handler
528 *
529 diff -urNp linux-2.6.32.42/arch/arm/mm/mmap.c linux-2.6.32.42/arch/arm/mm/mmap.c
530 --- linux-2.6.32.42/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
531 +++ linux-2.6.32.42/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
532 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
533 if (len > TASK_SIZE)
534 return -ENOMEM;
535
536 +#ifdef CONFIG_PAX_RANDMMAP
537 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
538 +#endif
539 +
540 if (addr) {
541 if (do_align)
542 addr = COLOUR_ALIGN(addr, pgoff);
543 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
544 addr = PAGE_ALIGN(addr);
545
546 vma = find_vma(mm, addr);
547 - if (TASK_SIZE - len >= addr &&
548 - (!vma || addr + len <= vma->vm_start))
549 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
550 return addr;
551 }
552 if (len > mm->cached_hole_size) {
553 - start_addr = addr = mm->free_area_cache;
554 + start_addr = addr = mm->free_area_cache;
555 } else {
556 - start_addr = addr = TASK_UNMAPPED_BASE;
557 - mm->cached_hole_size = 0;
558 + start_addr = addr = mm->mmap_base;
559 + mm->cached_hole_size = 0;
560 }
561
562 full_search:
563 @@ -94,14 +97,14 @@ full_search:
564 * Start a new search - just in case we missed
565 * some holes.
566 */
567 - if (start_addr != TASK_UNMAPPED_BASE) {
568 - start_addr = addr = TASK_UNMAPPED_BASE;
569 + if (start_addr != mm->mmap_base) {
570 + start_addr = addr = mm->mmap_base;
571 mm->cached_hole_size = 0;
572 goto full_search;
573 }
574 return -ENOMEM;
575 }
576 - if (!vma || addr + len <= vma->vm_start) {
577 + if (check_heap_stack_gap(vma, addr, len)) {
578 /*
579 * Remember the place where we stopped the search:
580 */
581 diff -urNp linux-2.6.32.42/arch/arm/plat-s3c/pm.c linux-2.6.32.42/arch/arm/plat-s3c/pm.c
582 --- linux-2.6.32.42/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
583 +++ linux-2.6.32.42/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
584 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
585 s3c_pm_check_cleanup();
586 }
587
588 -static struct platform_suspend_ops s3c_pm_ops = {
589 +static const struct platform_suspend_ops s3c_pm_ops = {
590 .enter = s3c_pm_enter,
591 .prepare = s3c_pm_prepare,
592 .finish = s3c_pm_finish,
593 diff -urNp linux-2.6.32.42/arch/avr32/include/asm/elf.h linux-2.6.32.42/arch/avr32/include/asm/elf.h
594 --- linux-2.6.32.42/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
595 +++ linux-2.6.32.42/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
596 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
597 the loader. We need to make sure that it is out of the way of the program
598 that it will "exec", and that there is sufficient room for the brk. */
599
600 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
601 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
602
603 +#ifdef CONFIG_PAX_ASLR
604 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
605 +
606 +#define PAX_DELTA_MMAP_LEN 15
607 +#define PAX_DELTA_STACK_LEN 15
608 +#endif
609
610 /* This yields a mask that user programs can use to figure out what
611 instruction set this CPU supports. This could be done in user space,
612 diff -urNp linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h
613 --- linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
614 +++ linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
615 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
616 D(11) KM_IRQ1,
617 D(12) KM_SOFTIRQ0,
618 D(13) KM_SOFTIRQ1,
619 -D(14) KM_TYPE_NR
620 +D(14) KM_CLEARPAGE,
621 +D(15) KM_TYPE_NR
622 };
623
624 #undef D
625 diff -urNp linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c
626 --- linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
627 +++ linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
628 @@ -176,7 +176,7 @@ out:
629 return 0;
630 }
631
632 -static struct platform_suspend_ops avr32_pm_ops = {
633 +static const struct platform_suspend_ops avr32_pm_ops = {
634 .valid = avr32_pm_valid_state,
635 .enter = avr32_pm_enter,
636 };
637 diff -urNp linux-2.6.32.42/arch/avr32/mm/fault.c linux-2.6.32.42/arch/avr32/mm/fault.c
638 --- linux-2.6.32.42/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
639 +++ linux-2.6.32.42/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
640 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
641
642 int exception_trace = 1;
643
644 +#ifdef CONFIG_PAX_PAGEEXEC
645 +void pax_report_insns(void *pc, void *sp)
646 +{
647 + unsigned long i;
648 +
649 + printk(KERN_ERR "PAX: bytes at PC: ");
650 + for (i = 0; i < 20; i++) {
651 + unsigned char c;
652 + if (get_user(c, (unsigned char *)pc+i))
653 + printk(KERN_CONT "???????? ");
654 + else
655 + printk(KERN_CONT "%02x ", c);
656 + }
657 + printk("\n");
658 +}
659 +#endif
660 +
661 /*
662 * This routine handles page faults. It determines the address and the
663 * problem, and then passes it off to one of the appropriate routines.
664 @@ -157,6 +174,16 @@ bad_area:
665 up_read(&mm->mmap_sem);
666
667 if (user_mode(regs)) {
668 +
669 +#ifdef CONFIG_PAX_PAGEEXEC
670 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
671 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
672 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
673 + do_group_exit(SIGKILL);
674 + }
675 + }
676 +#endif
677 +
678 if (exception_trace && printk_ratelimit())
679 printk("%s%s[%d]: segfault at %08lx pc %08lx "
680 "sp %08lx ecr %lu\n",
681 diff -urNp linux-2.6.32.42/arch/blackfin/kernel/kgdb.c linux-2.6.32.42/arch/blackfin/kernel/kgdb.c
682 --- linux-2.6.32.42/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
683 +++ linux-2.6.32.42/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
684 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
685 return -1; /* this means that we do not want to exit from the handler */
686 }
687
688 -struct kgdb_arch arch_kgdb_ops = {
689 +const struct kgdb_arch arch_kgdb_ops = {
690 .gdb_bpt_instr = {0xa1},
691 #ifdef CONFIG_SMP
692 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
693 diff -urNp linux-2.6.32.42/arch/blackfin/mach-common/pm.c linux-2.6.32.42/arch/blackfin/mach-common/pm.c
694 --- linux-2.6.32.42/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
695 +++ linux-2.6.32.42/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
696 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
697 return 0;
698 }
699
700 -struct platform_suspend_ops bfin_pm_ops = {
701 +const struct platform_suspend_ops bfin_pm_ops = {
702 .enter = bfin_pm_enter,
703 .valid = bfin_pm_valid,
704 };
705 diff -urNp linux-2.6.32.42/arch/frv/include/asm/kmap_types.h linux-2.6.32.42/arch/frv/include/asm/kmap_types.h
706 --- linux-2.6.32.42/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
707 +++ linux-2.6.32.42/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
708 @@ -23,6 +23,7 @@ enum km_type {
709 KM_IRQ1,
710 KM_SOFTIRQ0,
711 KM_SOFTIRQ1,
712 + KM_CLEARPAGE,
713 KM_TYPE_NR
714 };
715
716 diff -urNp linux-2.6.32.42/arch/frv/mm/elf-fdpic.c linux-2.6.32.42/arch/frv/mm/elf-fdpic.c
717 --- linux-2.6.32.42/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
718 +++ linux-2.6.32.42/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
719 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
720 if (addr) {
721 addr = PAGE_ALIGN(addr);
722 vma = find_vma(current->mm, addr);
723 - if (TASK_SIZE - len >= addr &&
724 - (!vma || addr + len <= vma->vm_start))
725 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
726 goto success;
727 }
728
729 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
730 for (; vma; vma = vma->vm_next) {
731 if (addr > limit)
732 break;
733 - if (addr + len <= vma->vm_start)
734 + if (check_heap_stack_gap(vma, addr, len))
735 goto success;
736 addr = vma->vm_end;
737 }
738 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742 - if (addr + len <= vma->vm_start)
743 + if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747 diff -urNp linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c
748 --- linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
749 +++ linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
750 @@ -17,7 +17,7 @@
751 #include <linux/swiotlb.h>
752 #include <asm/machvec.h>
753
754 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
755 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
756
757 /* swiotlb declarations & definitions: */
758 extern int swiotlb_late_init_with_default_size (size_t size);
759 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
760 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
761 }
762
763 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
764 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
765 {
766 if (use_swiotlb(dev))
767 return &swiotlb_dma_ops;
768 diff -urNp linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c
769 --- linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
770 +++ linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
771 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
772 },
773 };
774
775 -extern struct dma_map_ops swiotlb_dma_ops;
776 +extern const struct dma_map_ops swiotlb_dma_ops;
777
778 static int __init
779 sba_init(void)
780 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
781
782 __setup("sbapagesize=",sba_page_override);
783
784 -struct dma_map_ops sba_dma_ops = {
785 +const struct dma_map_ops sba_dma_ops = {
786 .alloc_coherent = sba_alloc_coherent,
787 .free_coherent = sba_free_coherent,
788 .map_page = sba_map_page,
789 diff -urNp linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c
790 --- linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
791 +++ linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
792 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
793
794 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
795
796 +#ifdef CONFIG_PAX_ASLR
797 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
798 +
799 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
800 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
801 +#endif
802 +
803 /* Ugly but avoids duplication */
804 #include "../../../fs/binfmt_elf.c"
805
806 diff -urNp linux-2.6.32.42/arch/ia64/ia32/ia32priv.h linux-2.6.32.42/arch/ia64/ia32/ia32priv.h
807 --- linux-2.6.32.42/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
808 +++ linux-2.6.32.42/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
809 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
810 #define ELF_DATA ELFDATA2LSB
811 #define ELF_ARCH EM_386
812
813 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
814 +#ifdef CONFIG_PAX_RANDUSTACK
815 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
816 +#else
817 +#define __IA32_DELTA_STACK 0UL
818 +#endif
819 +
820 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
821 +
822 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
823 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
824
825 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h
826 --- linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
827 +++ linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
828 @@ -12,7 +12,7 @@
829
830 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
831
832 -extern struct dma_map_ops *dma_ops;
833 +extern const struct dma_map_ops *dma_ops;
834 extern struct ia64_machine_vector ia64_mv;
835 extern void set_iommu_machvec(void);
836
837 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
838 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
839 dma_addr_t *daddr, gfp_t gfp)
840 {
841 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
842 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
843 void *caddr;
844
845 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
846 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
847 static inline void dma_free_coherent(struct device *dev, size_t size,
848 void *caddr, dma_addr_t daddr)
849 {
850 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
851 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
852 debug_dma_free_coherent(dev, size, caddr, daddr);
853 ops->free_coherent(dev, size, caddr, daddr);
854 }
855 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
856
857 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
858 {
859 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
860 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
861 return ops->mapping_error(dev, daddr);
862 }
863
864 static inline int dma_supported(struct device *dev, u64 mask)
865 {
866 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
867 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
868 return ops->dma_supported(dev, mask);
869 }
870
871 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/elf.h linux-2.6.32.42/arch/ia64/include/asm/elf.h
872 --- linux-2.6.32.42/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
873 +++ linux-2.6.32.42/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
874 @@ -43,6 +43,13 @@
875 */
876 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
877
878 +#ifdef CONFIG_PAX_ASLR
879 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
880 +
881 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
882 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
883 +#endif
884 +
885 #define PT_IA_64_UNWIND 0x70000001
886
887 /* IA-64 relocations: */
888 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/machvec.h linux-2.6.32.42/arch/ia64/include/asm/machvec.h
889 --- linux-2.6.32.42/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
890 +++ linux-2.6.32.42/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
891 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
892 /* DMA-mapping interface: */
893 typedef void ia64_mv_dma_init (void);
894 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
895 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
896 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
897
898 /*
899 * WARNING: The legacy I/O space is _architected_. Platforms are
900 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
901 # endif /* CONFIG_IA64_GENERIC */
902
903 extern void swiotlb_dma_init(void);
904 -extern struct dma_map_ops *dma_get_ops(struct device *);
905 +extern const struct dma_map_ops *dma_get_ops(struct device *);
906
907 /*
908 * Define default versions so we can extend machvec for new platforms without having
909 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/pgtable.h linux-2.6.32.42/arch/ia64/include/asm/pgtable.h
910 --- linux-2.6.32.42/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
911 +++ linux-2.6.32.42/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
912 @@ -12,7 +12,7 @@
913 * David Mosberger-Tang <davidm@hpl.hp.com>
914 */
915
916 -
917 +#include <linux/const.h>
918 #include <asm/mman.h>
919 #include <asm/page.h>
920 #include <asm/processor.h>
921 @@ -143,6 +143,17 @@
922 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
923 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
924 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
925 +
926 +#ifdef CONFIG_PAX_PAGEEXEC
927 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
928 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
929 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
930 +#else
931 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
932 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
933 +# define PAGE_COPY_NOEXEC PAGE_COPY
934 +#endif
935 +
936 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
937 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
938 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
939 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/spinlock.h linux-2.6.32.42/arch/ia64/include/asm/spinlock.h
940 --- linux-2.6.32.42/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
941 +++ linux-2.6.32.42/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
942 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
943 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
944
945 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
946 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
947 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
948 }
949
950 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
951 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/uaccess.h linux-2.6.32.42/arch/ia64/include/asm/uaccess.h
952 --- linux-2.6.32.42/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
953 +++ linux-2.6.32.42/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
954 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
955 const void *__cu_from = (from); \
956 long __cu_len = (n); \
957 \
958 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
959 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
960 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
961 __cu_len; \
962 })
963 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
964 long __cu_len = (n); \
965 \
966 __chk_user_ptr(__cu_from); \
967 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
968 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
969 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
970 __cu_len; \
971 })
972 diff -urNp linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c
973 --- linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
974 +++ linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
975 @@ -3,7 +3,7 @@
976 /* Set this to 1 if there is a HW IOMMU in the system */
977 int iommu_detected __read_mostly;
978
979 -struct dma_map_ops *dma_ops;
980 +const struct dma_map_ops *dma_ops;
981 EXPORT_SYMBOL(dma_ops);
982
983 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
984 @@ -16,7 +16,7 @@ static int __init dma_init(void)
985 }
986 fs_initcall(dma_init);
987
988 -struct dma_map_ops *dma_get_ops(struct device *dev)
989 +const struct dma_map_ops *dma_get_ops(struct device *dev)
990 {
991 return dma_ops;
992 }
993 diff -urNp linux-2.6.32.42/arch/ia64/kernel/module.c linux-2.6.32.42/arch/ia64/kernel/module.c
994 --- linux-2.6.32.42/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
995 +++ linux-2.6.32.42/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
996 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
997 void
998 module_free (struct module *mod, void *module_region)
999 {
1000 - if (mod && mod->arch.init_unw_table &&
1001 - module_region == mod->module_init) {
1002 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1003 unw_remove_unwind_table(mod->arch.init_unw_table);
1004 mod->arch.init_unw_table = NULL;
1005 }
1006 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1007 }
1008
1009 static inline int
1010 +in_init_rx (const struct module *mod, uint64_t addr)
1011 +{
1012 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1013 +}
1014 +
1015 +static inline int
1016 +in_init_rw (const struct module *mod, uint64_t addr)
1017 +{
1018 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1019 +}
1020 +
1021 +static inline int
1022 in_init (const struct module *mod, uint64_t addr)
1023 {
1024 - return addr - (uint64_t) mod->module_init < mod->init_size;
1025 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1026 +}
1027 +
1028 +static inline int
1029 +in_core_rx (const struct module *mod, uint64_t addr)
1030 +{
1031 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1032 +}
1033 +
1034 +static inline int
1035 +in_core_rw (const struct module *mod, uint64_t addr)
1036 +{
1037 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1038 }
1039
1040 static inline int
1041 in_core (const struct module *mod, uint64_t addr)
1042 {
1043 - return addr - (uint64_t) mod->module_core < mod->core_size;
1044 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1045 }
1046
1047 static inline int
1048 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1049 break;
1050
1051 case RV_BDREL:
1052 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1053 + if (in_init_rx(mod, val))
1054 + val -= (uint64_t) mod->module_init_rx;
1055 + else if (in_init_rw(mod, val))
1056 + val -= (uint64_t) mod->module_init_rw;
1057 + else if (in_core_rx(mod, val))
1058 + val -= (uint64_t) mod->module_core_rx;
1059 + else if (in_core_rw(mod, val))
1060 + val -= (uint64_t) mod->module_core_rw;
1061 break;
1062
1063 case RV_LTV:
1064 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1065 * addresses have been selected...
1066 */
1067 uint64_t gp;
1068 - if (mod->core_size > MAX_LTOFF)
1069 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1070 /*
1071 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1072 * at the end of the module.
1073 */
1074 - gp = mod->core_size - MAX_LTOFF / 2;
1075 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1076 else
1077 - gp = mod->core_size / 2;
1078 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1079 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1080 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1081 mod->arch.gp = gp;
1082 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1083 }
1084 diff -urNp linux-2.6.32.42/arch/ia64/kernel/pci-dma.c linux-2.6.32.42/arch/ia64/kernel/pci-dma.c
1085 --- linux-2.6.32.42/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1086 +++ linux-2.6.32.42/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1087 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1088 .dma_mask = &fallback_dev.coherent_dma_mask,
1089 };
1090
1091 -extern struct dma_map_ops intel_dma_ops;
1092 +extern const struct dma_map_ops intel_dma_ops;
1093
1094 static int __init pci_iommu_init(void)
1095 {
1096 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1097 }
1098 EXPORT_SYMBOL(iommu_dma_supported);
1099
1100 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1101 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1102 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1103 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1104 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1105 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1106 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1107 +
1108 +static const struct dma_map_ops intel_iommu_dma_ops = {
1109 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1110 + .alloc_coherent = intel_alloc_coherent,
1111 + .free_coherent = intel_free_coherent,
1112 + .map_sg = intel_map_sg,
1113 + .unmap_sg = intel_unmap_sg,
1114 + .map_page = intel_map_page,
1115 + .unmap_page = intel_unmap_page,
1116 + .mapping_error = intel_mapping_error,
1117 +
1118 + .sync_single_for_cpu = machvec_dma_sync_single,
1119 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1120 + .sync_single_for_device = machvec_dma_sync_single,
1121 + .sync_sg_for_device = machvec_dma_sync_sg,
1122 + .dma_supported = iommu_dma_supported,
1123 +};
1124 +
1125 void __init pci_iommu_alloc(void)
1126 {
1127 - dma_ops = &intel_dma_ops;
1128 -
1129 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1130 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1131 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1132 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1133 - dma_ops->dma_supported = iommu_dma_supported;
1134 + dma_ops = &intel_iommu_dma_ops;
1135
1136 /*
1137 * The order of these functions is important for
1138 diff -urNp linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c
1139 --- linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1140 +++ linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1141 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1142 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1143 }
1144
1145 -struct dma_map_ops swiotlb_dma_ops = {
1146 +const struct dma_map_ops swiotlb_dma_ops = {
1147 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1148 .free_coherent = swiotlb_free_coherent,
1149 .map_page = swiotlb_map_page,
1150 diff -urNp linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c
1151 --- linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1152 +++ linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1153 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1154 if (REGION_NUMBER(addr) == RGN_HPAGE)
1155 addr = 0;
1156 #endif
1157 +
1158 +#ifdef CONFIG_PAX_RANDMMAP
1159 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1160 + addr = mm->free_area_cache;
1161 + else
1162 +#endif
1163 +
1164 if (!addr)
1165 addr = mm->free_area_cache;
1166
1167 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1168 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1169 /* At this point: (!vma || addr < vma->vm_end). */
1170 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1171 - if (start_addr != TASK_UNMAPPED_BASE) {
1172 + if (start_addr != mm->mmap_base) {
1173 /* Start a new search --- just in case we missed some holes. */
1174 - addr = TASK_UNMAPPED_BASE;
1175 + addr = mm->mmap_base;
1176 goto full_search;
1177 }
1178 return -ENOMEM;
1179 }
1180 - if (!vma || addr + len <= vma->vm_start) {
1181 + if (check_heap_stack_gap(vma, addr, len)) {
1182 /* Remember the address where we stopped this search: */
1183 mm->free_area_cache = addr + len;
1184 return addr;
1185 diff -urNp linux-2.6.32.42/arch/ia64/kernel/topology.c linux-2.6.32.42/arch/ia64/kernel/topology.c
1186 --- linux-2.6.32.42/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1187 +++ linux-2.6.32.42/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1188 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1189 return ret;
1190 }
1191
1192 -static struct sysfs_ops cache_sysfs_ops = {
1193 +static const struct sysfs_ops cache_sysfs_ops = {
1194 .show = cache_show
1195 };
1196
1197 diff -urNp linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S
1198 --- linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1199 +++ linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1200 @@ -190,7 +190,7 @@ SECTIONS
1201 /* Per-cpu data: */
1202 . = ALIGN(PERCPU_PAGE_SIZE);
1203 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1204 - __phys_per_cpu_start = __per_cpu_load;
1205 + __phys_per_cpu_start = per_cpu_load;
1206 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1207 * into percpu page size
1208 */
1209 diff -urNp linux-2.6.32.42/arch/ia64/mm/fault.c linux-2.6.32.42/arch/ia64/mm/fault.c
1210 --- linux-2.6.32.42/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1211 +++ linux-2.6.32.42/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1212 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1213 return pte_present(pte);
1214 }
1215
1216 +#ifdef CONFIG_PAX_PAGEEXEC
1217 +void pax_report_insns(void *pc, void *sp)
1218 +{
1219 + unsigned long i;
1220 +
1221 + printk(KERN_ERR "PAX: bytes at PC: ");
1222 + for (i = 0; i < 8; i++) {
1223 + unsigned int c;
1224 + if (get_user(c, (unsigned int *)pc+i))
1225 + printk(KERN_CONT "???????? ");
1226 + else
1227 + printk(KERN_CONT "%08x ", c);
1228 + }
1229 + printk("\n");
1230 +}
1231 +#endif
1232 +
1233 void __kprobes
1234 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1235 {
1236 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1237 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1238 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1239
1240 - if ((vma->vm_flags & mask) != mask)
1241 + if ((vma->vm_flags & mask) != mask) {
1242 +
1243 +#ifdef CONFIG_PAX_PAGEEXEC
1244 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1245 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1246 + goto bad_area;
1247 +
1248 + up_read(&mm->mmap_sem);
1249 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1250 + do_group_exit(SIGKILL);
1251 + }
1252 +#endif
1253 +
1254 goto bad_area;
1255
1256 + }
1257 +
1258 survive:
1259 /*
1260 * If for any reason at all we couldn't handle the fault, make
1261 diff -urNp linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c
1262 --- linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1263 +++ linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1264 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1265 /* At this point: (!vmm || addr < vmm->vm_end). */
1266 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1267 return -ENOMEM;
1268 - if (!vmm || (addr + len) <= vmm->vm_start)
1269 + if (check_heap_stack_gap(vmm, addr, len))
1270 return addr;
1271 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1272 }
1273 diff -urNp linux-2.6.32.42/arch/ia64/mm/init.c linux-2.6.32.42/arch/ia64/mm/init.c
1274 --- linux-2.6.32.42/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1275 +++ linux-2.6.32.42/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1276 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1277 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1278 vma->vm_end = vma->vm_start + PAGE_SIZE;
1279 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1280 +
1281 +#ifdef CONFIG_PAX_PAGEEXEC
1282 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1283 + vma->vm_flags &= ~VM_EXEC;
1284 +
1285 +#ifdef CONFIG_PAX_MPROTECT
1286 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1287 + vma->vm_flags &= ~VM_MAYEXEC;
1288 +#endif
1289 +
1290 + }
1291 +#endif
1292 +
1293 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1294 down_write(&current->mm->mmap_sem);
1295 if (insert_vm_struct(current->mm, vma)) {
1296 diff -urNp linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c
1297 --- linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1298 +++ linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1299 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1300 return ret;
1301 }
1302
1303 -static struct dma_map_ops sn_dma_ops = {
1304 +static const struct dma_map_ops sn_dma_ops = {
1305 .alloc_coherent = sn_dma_alloc_coherent,
1306 .free_coherent = sn_dma_free_coherent,
1307 .map_page = sn_dma_map_page,
1308 diff -urNp linux-2.6.32.42/arch/m32r/lib/usercopy.c linux-2.6.32.42/arch/m32r/lib/usercopy.c
1309 --- linux-2.6.32.42/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1310 +++ linux-2.6.32.42/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1311 @@ -14,6 +14,9 @@
1312 unsigned long
1313 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1314 {
1315 + if ((long)n < 0)
1316 + return n;
1317 +
1318 prefetch(from);
1319 if (access_ok(VERIFY_WRITE, to, n))
1320 __copy_user(to,from,n);
1321 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1322 unsigned long
1323 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1324 {
1325 + if ((long)n < 0)
1326 + return n;
1327 +
1328 prefetchw(to);
1329 if (access_ok(VERIFY_READ, from, n))
1330 __copy_user_zeroing(to,from,n);
1331 diff -urNp linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c
1332 --- linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1333 +++ linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1334 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1335
1336 }
1337
1338 -static struct platform_suspend_ops db1x_pm_ops = {
1339 +static const struct platform_suspend_ops db1x_pm_ops = {
1340 .valid = suspend_valid_only_mem,
1341 .begin = db1x_pm_begin,
1342 .enter = db1x_pm_enter,
1343 diff -urNp linux-2.6.32.42/arch/mips/include/asm/elf.h linux-2.6.32.42/arch/mips/include/asm/elf.h
1344 --- linux-2.6.32.42/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1345 +++ linux-2.6.32.42/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1346 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1347 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1348 #endif
1349
1350 +#ifdef CONFIG_PAX_ASLR
1351 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1352 +
1353 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1354 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1355 +#endif
1356 +
1357 #endif /* _ASM_ELF_H */
1358 diff -urNp linux-2.6.32.42/arch/mips/include/asm/page.h linux-2.6.32.42/arch/mips/include/asm/page.h
1359 --- linux-2.6.32.42/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1360 +++ linux-2.6.32.42/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1361 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1362 #ifdef CONFIG_CPU_MIPS32
1363 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1364 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1365 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1366 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1367 #else
1368 typedef struct { unsigned long long pte; } pte_t;
1369 #define pte_val(x) ((x).pte)
1370 diff -urNp linux-2.6.32.42/arch/mips/include/asm/system.h linux-2.6.32.42/arch/mips/include/asm/system.h
1371 --- linux-2.6.32.42/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1372 +++ linux-2.6.32.42/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1373 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1374 */
1375 #define __ARCH_WANT_UNLOCKED_CTXSW
1376
1377 -extern unsigned long arch_align_stack(unsigned long sp);
1378 +#define arch_align_stack(x) ((x) & ~0xfUL)
1379
1380 #endif /* _ASM_SYSTEM_H */
1381 diff -urNp linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c
1382 --- linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1383 +++ linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1384 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1385 #undef ELF_ET_DYN_BASE
1386 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1387
1388 +#ifdef CONFIG_PAX_ASLR
1389 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1390 +
1391 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1392 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1393 +#endif
1394 +
1395 #include <asm/processor.h>
1396 #include <linux/module.h>
1397 #include <linux/elfcore.h>
1398 diff -urNp linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c
1399 --- linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1400 +++ linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1401 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1402 #undef ELF_ET_DYN_BASE
1403 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1404
1405 +#ifdef CONFIG_PAX_ASLR
1406 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1407 +
1408 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1409 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1410 +#endif
1411 +
1412 #include <asm/processor.h>
1413
1414 /*
1415 diff -urNp linux-2.6.32.42/arch/mips/kernel/kgdb.c linux-2.6.32.42/arch/mips/kernel/kgdb.c
1416 --- linux-2.6.32.42/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1417 +++ linux-2.6.32.42/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1418 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1419 return -1;
1420 }
1421
1422 +/* cannot be const */
1423 struct kgdb_arch arch_kgdb_ops;
1424
1425 /*
1426 diff -urNp linux-2.6.32.42/arch/mips/kernel/process.c linux-2.6.32.42/arch/mips/kernel/process.c
1427 --- linux-2.6.32.42/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1428 +++ linux-2.6.32.42/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1429 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1430 out:
1431 return pc;
1432 }
1433 -
1434 -/*
1435 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1436 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1437 - */
1438 -unsigned long arch_align_stack(unsigned long sp)
1439 -{
1440 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1441 - sp -= get_random_int() & ~PAGE_MASK;
1442 -
1443 - return sp & ALMASK;
1444 -}
1445 diff -urNp linux-2.6.32.42/arch/mips/kernel/syscall.c linux-2.6.32.42/arch/mips/kernel/syscall.c
1446 --- linux-2.6.32.42/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1447 +++ linux-2.6.32.42/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1448 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1449 do_color_align = 0;
1450 if (filp || (flags & MAP_SHARED))
1451 do_color_align = 1;
1452 +
1453 +#ifdef CONFIG_PAX_RANDMMAP
1454 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1455 +#endif
1456 +
1457 if (addr) {
1458 if (do_color_align)
1459 addr = COLOUR_ALIGN(addr, pgoff);
1460 else
1461 addr = PAGE_ALIGN(addr);
1462 vmm = find_vma(current->mm, addr);
1463 - if (task_size - len >= addr &&
1464 - (!vmm || addr + len <= vmm->vm_start))
1465 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1466 return addr;
1467 }
1468 - addr = TASK_UNMAPPED_BASE;
1469 + addr = current->mm->mmap_base;
1470 if (do_color_align)
1471 addr = COLOUR_ALIGN(addr, pgoff);
1472 else
1473 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1474 /* At this point: (!vmm || addr < vmm->vm_end). */
1475 if (task_size - len < addr)
1476 return -ENOMEM;
1477 - if (!vmm || addr + len <= vmm->vm_start)
1478 + if (check_heap_stack_gap(vmm, addr, len))
1479 return addr;
1480 addr = vmm->vm_end;
1481 if (do_color_align)
1482 diff -urNp linux-2.6.32.42/arch/mips/mm/fault.c linux-2.6.32.42/arch/mips/mm/fault.c
1483 --- linux-2.6.32.42/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1484 +++ linux-2.6.32.42/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1485 @@ -26,6 +26,23 @@
1486 #include <asm/ptrace.h>
1487 #include <asm/highmem.h> /* For VMALLOC_END */
1488
1489 +#ifdef CONFIG_PAX_PAGEEXEC
1490 +void pax_report_insns(void *pc, void *sp)
1491 +{
1492 + unsigned long i;
1493 +
1494 + printk(KERN_ERR "PAX: bytes at PC: ");
1495 + for (i = 0; i < 5; i++) {
1496 + unsigned int c;
1497 + if (get_user(c, (unsigned int *)pc+i))
1498 + printk(KERN_CONT "???????? ");
1499 + else
1500 + printk(KERN_CONT "%08x ", c);
1501 + }
1502 + printk("\n");
1503 +}
1504 +#endif
1505 +
1506 /*
1507 * This routine handles page faults. It determines the address,
1508 * and the problem, and then passes it off to one of the appropriate
1509 diff -urNp linux-2.6.32.42/arch/parisc/include/asm/elf.h linux-2.6.32.42/arch/parisc/include/asm/elf.h
1510 --- linux-2.6.32.42/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1511 +++ linux-2.6.32.42/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1512 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1513
1514 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1515
1516 +#ifdef CONFIG_PAX_ASLR
1517 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1518 +
1519 +#define PAX_DELTA_MMAP_LEN 16
1520 +#define PAX_DELTA_STACK_LEN 16
1521 +#endif
1522 +
1523 /* This yields a mask that user programs can use to figure out what
1524 instruction set this CPU supports. This could be done in user space,
1525 but it's not easy, and we've already done it here. */
1526 diff -urNp linux-2.6.32.42/arch/parisc/include/asm/pgtable.h linux-2.6.32.42/arch/parisc/include/asm/pgtable.h
1527 --- linux-2.6.32.42/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1528 +++ linux-2.6.32.42/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1529 @@ -207,6 +207,17 @@
1530 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1531 #define PAGE_COPY PAGE_EXECREAD
1532 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1533 +
1534 +#ifdef CONFIG_PAX_PAGEEXEC
1535 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1536 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1537 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1538 +#else
1539 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1540 +# define PAGE_COPY_NOEXEC PAGE_COPY
1541 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1542 +#endif
1543 +
1544 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1545 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1546 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1547 diff -urNp linux-2.6.32.42/arch/parisc/kernel/module.c linux-2.6.32.42/arch/parisc/kernel/module.c
1548 --- linux-2.6.32.42/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1549 +++ linux-2.6.32.42/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1550 @@ -95,16 +95,38 @@
1551
1552 /* three functions to determine where in the module core
1553 * or init pieces the location is */
1554 +static inline int in_init_rx(struct module *me, void *loc)
1555 +{
1556 + return (loc >= me->module_init_rx &&
1557 + loc < (me->module_init_rx + me->init_size_rx));
1558 +}
1559 +
1560 +static inline int in_init_rw(struct module *me, void *loc)
1561 +{
1562 + return (loc >= me->module_init_rw &&
1563 + loc < (me->module_init_rw + me->init_size_rw));
1564 +}
1565 +
1566 static inline int in_init(struct module *me, void *loc)
1567 {
1568 - return (loc >= me->module_init &&
1569 - loc <= (me->module_init + me->init_size));
1570 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1571 +}
1572 +
1573 +static inline int in_core_rx(struct module *me, void *loc)
1574 +{
1575 + return (loc >= me->module_core_rx &&
1576 + loc < (me->module_core_rx + me->core_size_rx));
1577 +}
1578 +
1579 +static inline int in_core_rw(struct module *me, void *loc)
1580 +{
1581 + return (loc >= me->module_core_rw &&
1582 + loc < (me->module_core_rw + me->core_size_rw));
1583 }
1584
1585 static inline int in_core(struct module *me, void *loc)
1586 {
1587 - return (loc >= me->module_core &&
1588 - loc <= (me->module_core + me->core_size));
1589 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1590 }
1591
1592 static inline int in_local(struct module *me, void *loc)
1593 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1594 }
1595
1596 /* align things a bit */
1597 - me->core_size = ALIGN(me->core_size, 16);
1598 - me->arch.got_offset = me->core_size;
1599 - me->core_size += gots * sizeof(struct got_entry);
1600 -
1601 - me->core_size = ALIGN(me->core_size, 16);
1602 - me->arch.fdesc_offset = me->core_size;
1603 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1604 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1605 + me->arch.got_offset = me->core_size_rw;
1606 + me->core_size_rw += gots * sizeof(struct got_entry);
1607 +
1608 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1609 + me->arch.fdesc_offset = me->core_size_rw;
1610 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1611
1612 me->arch.got_max = gots;
1613 me->arch.fdesc_max = fdescs;
1614 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1615
1616 BUG_ON(value == 0);
1617
1618 - got = me->module_core + me->arch.got_offset;
1619 + got = me->module_core_rw + me->arch.got_offset;
1620 for (i = 0; got[i].addr; i++)
1621 if (got[i].addr == value)
1622 goto out;
1623 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1624 #ifdef CONFIG_64BIT
1625 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1626 {
1627 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1628 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1629
1630 if (!value) {
1631 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1632 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1633
1634 /* Create new one */
1635 fdesc->addr = value;
1636 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1637 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1638 return (Elf_Addr)fdesc;
1639 }
1640 #endif /* CONFIG_64BIT */
1641 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1642
1643 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1644 end = table + sechdrs[me->arch.unwind_section].sh_size;
1645 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1646 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1647
1648 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1649 me->arch.unwind_section, table, end, gp);
1650 diff -urNp linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c
1651 --- linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1652 +++ linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1653 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1654 /* At this point: (!vma || addr < vma->vm_end). */
1655 if (TASK_SIZE - len < addr)
1656 return -ENOMEM;
1657 - if (!vma || addr + len <= vma->vm_start)
1658 + if (check_heap_stack_gap(vma, addr, len))
1659 return addr;
1660 addr = vma->vm_end;
1661 }
1662 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1663 /* At this point: (!vma || addr < vma->vm_end). */
1664 if (TASK_SIZE - len < addr)
1665 return -ENOMEM;
1666 - if (!vma || addr + len <= vma->vm_start)
1667 + if (check_heap_stack_gap(vma, addr, len))
1668 return addr;
1669 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1670 if (addr < vma->vm_end) /* handle wraparound */
1671 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1672 if (flags & MAP_FIXED)
1673 return addr;
1674 if (!addr)
1675 - addr = TASK_UNMAPPED_BASE;
1676 + addr = current->mm->mmap_base;
1677
1678 if (filp) {
1679 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1680 diff -urNp linux-2.6.32.42/arch/parisc/kernel/traps.c linux-2.6.32.42/arch/parisc/kernel/traps.c
1681 --- linux-2.6.32.42/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1682 +++ linux-2.6.32.42/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1683 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1684
1685 down_read(&current->mm->mmap_sem);
1686 vma = find_vma(current->mm,regs->iaoq[0]);
1687 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1688 - && (vma->vm_flags & VM_EXEC)) {
1689 -
1690 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1691 fault_address = regs->iaoq[0];
1692 fault_space = regs->iasq[0];
1693
1694 diff -urNp linux-2.6.32.42/arch/parisc/mm/fault.c linux-2.6.32.42/arch/parisc/mm/fault.c
1695 --- linux-2.6.32.42/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1696 +++ linux-2.6.32.42/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1697 @@ -15,6 +15,7 @@
1698 #include <linux/sched.h>
1699 #include <linux/interrupt.h>
1700 #include <linux/module.h>
1701 +#include <linux/unistd.h>
1702
1703 #include <asm/uaccess.h>
1704 #include <asm/traps.h>
1705 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1706 static unsigned long
1707 parisc_acctyp(unsigned long code, unsigned int inst)
1708 {
1709 - if (code == 6 || code == 16)
1710 + if (code == 6 || code == 7 || code == 16)
1711 return VM_EXEC;
1712
1713 switch (inst & 0xf0000000) {
1714 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1715 }
1716 #endif
1717
1718 +#ifdef CONFIG_PAX_PAGEEXEC
1719 +/*
1720 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1721 + *
1722 + * returns 1 when task should be killed
1723 + * 2 when rt_sigreturn trampoline was detected
1724 + * 3 when unpatched PLT trampoline was detected
1725 + */
1726 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1727 +{
1728 +
1729 +#ifdef CONFIG_PAX_EMUPLT
1730 + int err;
1731 +
1732 + do { /* PaX: unpatched PLT emulation */
1733 + unsigned int bl, depwi;
1734 +
1735 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1736 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1737 +
1738 + if (err)
1739 + break;
1740 +
1741 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1742 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1743 +
1744 + err = get_user(ldw, (unsigned int *)addr);
1745 + err |= get_user(bv, (unsigned int *)(addr+4));
1746 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1747 +
1748 + if (err)
1749 + break;
1750 +
1751 + if (ldw == 0x0E801096U &&
1752 + bv == 0xEAC0C000U &&
1753 + ldw2 == 0x0E881095U)
1754 + {
1755 + unsigned int resolver, map;
1756 +
1757 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1758 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1759 + if (err)
1760 + break;
1761 +
1762 + regs->gr[20] = instruction_pointer(regs)+8;
1763 + regs->gr[21] = map;
1764 + regs->gr[22] = resolver;
1765 + regs->iaoq[0] = resolver | 3UL;
1766 + regs->iaoq[1] = regs->iaoq[0] + 4;
1767 + return 3;
1768 + }
1769 + }
1770 + } while (0);
1771 +#endif
1772 +
1773 +#ifdef CONFIG_PAX_EMUTRAMP
1774 +
1775 +#ifndef CONFIG_PAX_EMUSIGRT
1776 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1777 + return 1;
1778 +#endif
1779 +
1780 + do { /* PaX: rt_sigreturn emulation */
1781 + unsigned int ldi1, ldi2, bel, nop;
1782 +
1783 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1784 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1785 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1786 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1787 +
1788 + if (err)
1789 + break;
1790 +
1791 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1792 + ldi2 == 0x3414015AU &&
1793 + bel == 0xE4008200U &&
1794 + nop == 0x08000240U)
1795 + {
1796 + regs->gr[25] = (ldi1 & 2) >> 1;
1797 + regs->gr[20] = __NR_rt_sigreturn;
1798 + regs->gr[31] = regs->iaoq[1] + 16;
1799 + regs->sr[0] = regs->iasq[1];
1800 + regs->iaoq[0] = 0x100UL;
1801 + regs->iaoq[1] = regs->iaoq[0] + 4;
1802 + regs->iasq[0] = regs->sr[2];
1803 + regs->iasq[1] = regs->sr[2];
1804 + return 2;
1805 + }
1806 + } while (0);
1807 +#endif
1808 +
1809 + return 1;
1810 +}
1811 +
1812 +void pax_report_insns(void *pc, void *sp)
1813 +{
1814 + unsigned long i;
1815 +
1816 + printk(KERN_ERR "PAX: bytes at PC: ");
1817 + for (i = 0; i < 5; i++) {
1818 + unsigned int c;
1819 + if (get_user(c, (unsigned int *)pc+i))
1820 + printk(KERN_CONT "???????? ");
1821 + else
1822 + printk(KERN_CONT "%08x ", c);
1823 + }
1824 + printk("\n");
1825 +}
1826 +#endif
1827 +
1828 int fixup_exception(struct pt_regs *regs)
1829 {
1830 const struct exception_table_entry *fix;
1831 @@ -192,8 +303,33 @@ good_area:
1832
1833 acc_type = parisc_acctyp(code,regs->iir);
1834
1835 - if ((vma->vm_flags & acc_type) != acc_type)
1836 + if ((vma->vm_flags & acc_type) != acc_type) {
1837 +
1838 +#ifdef CONFIG_PAX_PAGEEXEC
1839 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1840 + (address & ~3UL) == instruction_pointer(regs))
1841 + {
1842 + up_read(&mm->mmap_sem);
1843 + switch (pax_handle_fetch_fault(regs)) {
1844 +
1845 +#ifdef CONFIG_PAX_EMUPLT
1846 + case 3:
1847 + return;
1848 +#endif
1849 +
1850 +#ifdef CONFIG_PAX_EMUTRAMP
1851 + case 2:
1852 + return;
1853 +#endif
1854 +
1855 + }
1856 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1857 + do_group_exit(SIGKILL);
1858 + }
1859 +#endif
1860 +
1861 goto bad_area;
1862 + }
1863
1864 /*
1865 * If for any reason at all we couldn't handle the fault, make
1866 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/device.h linux-2.6.32.42/arch/powerpc/include/asm/device.h
1867 --- linux-2.6.32.42/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
1868 +++ linux-2.6.32.42/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
1869 @@ -14,7 +14,7 @@ struct dev_archdata {
1870 struct device_node *of_node;
1871
1872 /* DMA operations on that device */
1873 - struct dma_map_ops *dma_ops;
1874 + const struct dma_map_ops *dma_ops;
1875
1876 /*
1877 * When an iommu is in use, dma_data is used as a ptr to the base of the
1878 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h
1879 --- linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
1880 +++ linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
1881 @@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
1882 #ifdef CONFIG_PPC64
1883 extern struct dma_map_ops dma_iommu_ops;
1884 #endif
1885 -extern struct dma_map_ops dma_direct_ops;
1886 +extern const struct dma_map_ops dma_direct_ops;
1887
1888 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
1889 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
1890 {
1891 /* We don't handle the NULL dev case for ISA for now. We could
1892 * do it via an out of line call but it is not needed for now. The
1893 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
1894 return dev->archdata.dma_ops;
1895 }
1896
1897 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
1898 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
1899 {
1900 dev->archdata.dma_ops = ops;
1901 }
1902 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
1903
1904 static inline int dma_supported(struct device *dev, u64 mask)
1905 {
1906 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1907 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1908
1909 if (unlikely(dma_ops == NULL))
1910 return 0;
1911 @@ -132,7 +132,7 @@ static inline int dma_supported(struct d
1912
1913 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
1914 {
1915 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1916 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1917
1918 if (unlikely(dma_ops == NULL))
1919 return -EIO;
1920 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
1921 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1922 dma_addr_t *dma_handle, gfp_t flag)
1923 {
1924 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1925 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1926 void *cpu_addr;
1927
1928 BUG_ON(!dma_ops);
1929 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
1930 static inline void dma_free_coherent(struct device *dev, size_t size,
1931 void *cpu_addr, dma_addr_t dma_handle)
1932 {
1933 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1934 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1935
1936 BUG_ON(!dma_ops);
1937
1938 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
1939
1940 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1941 {
1942 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1943 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1944
1945 if (dma_ops->mapping_error)
1946 return dma_ops->mapping_error(dev, dma_addr);
1947 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/elf.h linux-2.6.32.42/arch/powerpc/include/asm/elf.h
1948 --- linux-2.6.32.42/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1949 +++ linux-2.6.32.42/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1950 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1951 the loader. We need to make sure that it is out of the way of the program
1952 that it will "exec", and that there is sufficient room for the brk. */
1953
1954 -extern unsigned long randomize_et_dyn(unsigned long base);
1955 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1956 +#define ELF_ET_DYN_BASE (0x20000000)
1957 +
1958 +#ifdef CONFIG_PAX_ASLR
1959 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1960 +
1961 +#ifdef __powerpc64__
1962 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
1963 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
1964 +#else
1965 +#define PAX_DELTA_MMAP_LEN 15
1966 +#define PAX_DELTA_STACK_LEN 15
1967 +#endif
1968 +#endif
1969
1970 /*
1971 * Our registers are always unsigned longs, whether we're a 32 bit
1972 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
1973 (0x7ff >> (PAGE_SHIFT - 12)) : \
1974 (0x3ffff >> (PAGE_SHIFT - 12)))
1975
1976 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1977 -#define arch_randomize_brk arch_randomize_brk
1978 -
1979 #endif /* __KERNEL__ */
1980
1981 /*
1982 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/iommu.h linux-2.6.32.42/arch/powerpc/include/asm/iommu.h
1983 --- linux-2.6.32.42/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
1984 +++ linux-2.6.32.42/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
1985 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
1986 extern void iommu_init_early_dart(void);
1987 extern void iommu_init_early_pasemi(void);
1988
1989 +/* dma-iommu.c */
1990 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
1991 +
1992 #ifdef CONFIG_PCI
1993 extern void pci_iommu_init(void);
1994 extern void pci_direct_iommu_init(void);
1995 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h
1996 --- linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
1997 +++ linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
1998 @@ -26,6 +26,7 @@ enum km_type {
1999 KM_SOFTIRQ1,
2000 KM_PPC_SYNC_PAGE,
2001 KM_PPC_SYNC_ICACHE,
2002 + KM_CLEARPAGE,
2003 KM_TYPE_NR
2004 };
2005
2006 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/page_64.h linux-2.6.32.42/arch/powerpc/include/asm/page_64.h
2007 --- linux-2.6.32.42/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2008 +++ linux-2.6.32.42/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2009 @@ -180,15 +180,18 @@ do { \
2010 * stack by default, so in the absense of a PT_GNU_STACK program header
2011 * we turn execute permission off.
2012 */
2013 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2014 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2015 +#define VM_STACK_DEFAULT_FLAGS32 \
2016 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2017 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2018
2019 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2020 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2021
2022 +#ifndef CONFIG_PAX_PAGEEXEC
2023 #define VM_STACK_DEFAULT_FLAGS \
2024 (test_thread_flag(TIF_32BIT) ? \
2025 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2026 +#endif
2027
2028 #include <asm-generic/getorder.h>
2029
2030 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/page.h linux-2.6.32.42/arch/powerpc/include/asm/page.h
2031 --- linux-2.6.32.42/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2032 +++ linux-2.6.32.42/arch/powerpc/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
2033 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2034 * and needs to be executable. This means the whole heap ends
2035 * up being executable.
2036 */
2037 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2038 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2039 +#define VM_DATA_DEFAULT_FLAGS32 \
2040 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2041 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2042
2043 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2044 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2045 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2046 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2047 #endif
2048
2049 +#define ktla_ktva(addr) (addr)
2050 +#define ktva_ktla(addr) (addr)
2051 +
2052 #ifndef __ASSEMBLY__
2053
2054 #undef STRICT_MM_TYPECHECKS
2055 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/pci.h linux-2.6.32.42/arch/powerpc/include/asm/pci.h
2056 --- linux-2.6.32.42/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2057 +++ linux-2.6.32.42/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2058 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2059 }
2060
2061 #ifdef CONFIG_PCI
2062 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2063 -extern struct dma_map_ops *get_pci_dma_ops(void);
2064 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2065 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2066 #else /* CONFIG_PCI */
2067 #define set_pci_dma_ops(d)
2068 #define get_pci_dma_ops() NULL
2069 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h
2070 --- linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2071 +++ linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2072 @@ -2,6 +2,7 @@
2073 #define _ASM_POWERPC_PGTABLE_H
2074 #ifdef __KERNEL__
2075
2076 +#include <linux/const.h>
2077 #ifndef __ASSEMBLY__
2078 #include <asm/processor.h> /* For TASK_SIZE */
2079 #include <asm/mmu.h>
2080 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h
2081 --- linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2082 +++ linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2083 @@ -21,6 +21,7 @@
2084 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2085 #define _PAGE_USER 0x004 /* usermode access allowed */
2086 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2087 +#define _PAGE_EXEC _PAGE_GUARDED
2088 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2089 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2090 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2091 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/reg.h linux-2.6.32.42/arch/powerpc/include/asm/reg.h
2092 --- linux-2.6.32.42/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2093 +++ linux-2.6.32.42/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2094 @@ -191,6 +191,7 @@
2095 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2096 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2097 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2098 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2099 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2100 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2101 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2102 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h
2103 --- linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2104 +++ linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2105 @@ -13,7 +13,7 @@
2106
2107 #include <linux/swiotlb.h>
2108
2109 -extern struct dma_map_ops swiotlb_dma_ops;
2110 +extern const struct dma_map_ops swiotlb_dma_ops;
2111
2112 static inline void dma_mark_clean(void *addr, size_t size) {}
2113
2114 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/system.h linux-2.6.32.42/arch/powerpc/include/asm/system.h
2115 --- linux-2.6.32.42/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2116 +++ linux-2.6.32.42/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2117 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2118 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2119 #endif
2120
2121 -extern unsigned long arch_align_stack(unsigned long sp);
2122 +#define arch_align_stack(x) ((x) & ~0xfUL)
2123
2124 /* Used in very early kernel initialization. */
2125 extern unsigned long reloc_offset(void);
2126 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h
2127 --- linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2128 +++ linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2129 @@ -13,6 +13,8 @@
2130 #define VERIFY_READ 0
2131 #define VERIFY_WRITE 1
2132
2133 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2134 +
2135 /*
2136 * The fs value determines whether argument validity checking should be
2137 * performed or not. If get_fs() == USER_DS, checking is performed, with
2138 @@ -327,52 +329,6 @@ do { \
2139 extern unsigned long __copy_tofrom_user(void __user *to,
2140 const void __user *from, unsigned long size);
2141
2142 -#ifndef __powerpc64__
2143 -
2144 -static inline unsigned long copy_from_user(void *to,
2145 - const void __user *from, unsigned long n)
2146 -{
2147 - unsigned long over;
2148 -
2149 - if (access_ok(VERIFY_READ, from, n))
2150 - return __copy_tofrom_user((__force void __user *)to, from, n);
2151 - if ((unsigned long)from < TASK_SIZE) {
2152 - over = (unsigned long)from + n - TASK_SIZE;
2153 - return __copy_tofrom_user((__force void __user *)to, from,
2154 - n - over) + over;
2155 - }
2156 - return n;
2157 -}
2158 -
2159 -static inline unsigned long copy_to_user(void __user *to,
2160 - const void *from, unsigned long n)
2161 -{
2162 - unsigned long over;
2163 -
2164 - if (access_ok(VERIFY_WRITE, to, n))
2165 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2166 - if ((unsigned long)to < TASK_SIZE) {
2167 - over = (unsigned long)to + n - TASK_SIZE;
2168 - return __copy_tofrom_user(to, (__force void __user *)from,
2169 - n - over) + over;
2170 - }
2171 - return n;
2172 -}
2173 -
2174 -#else /* __powerpc64__ */
2175 -
2176 -#define __copy_in_user(to, from, size) \
2177 - __copy_tofrom_user((to), (from), (size))
2178 -
2179 -extern unsigned long copy_from_user(void *to, const void __user *from,
2180 - unsigned long n);
2181 -extern unsigned long copy_to_user(void __user *to, const void *from,
2182 - unsigned long n);
2183 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2184 - unsigned long n);
2185 -
2186 -#endif /* __powerpc64__ */
2187 -
2188 static inline unsigned long __copy_from_user_inatomic(void *to,
2189 const void __user *from, unsigned long n)
2190 {
2191 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2192 if (ret == 0)
2193 return 0;
2194 }
2195 +
2196 + if (!__builtin_constant_p(n))
2197 + check_object_size(to, n, false);
2198 +
2199 return __copy_tofrom_user((__force void __user *)to, from, n);
2200 }
2201
2202 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2203 if (ret == 0)
2204 return 0;
2205 }
2206 +
2207 + if (!__builtin_constant_p(n))
2208 + check_object_size(from, n, true);
2209 +
2210 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2211 }
2212
2213 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2214 return __copy_to_user_inatomic(to, from, size);
2215 }
2216
2217 +#ifndef __powerpc64__
2218 +
2219 +static inline unsigned long __must_check copy_from_user(void *to,
2220 + const void __user *from, unsigned long n)
2221 +{
2222 + unsigned long over;
2223 +
2224 + if ((long)n < 0)
2225 + return n;
2226 +
2227 + if (access_ok(VERIFY_READ, from, n)) {
2228 + if (!__builtin_constant_p(n))
2229 + check_object_size(to, n, false);
2230 + return __copy_tofrom_user((__force void __user *)to, from, n);
2231 + }
2232 + if ((unsigned long)from < TASK_SIZE) {
2233 + over = (unsigned long)from + n - TASK_SIZE;
2234 + if (!__builtin_constant_p(n - over))
2235 + check_object_size(to, n - over, false);
2236 + return __copy_tofrom_user((__force void __user *)to, from,
2237 + n - over) + over;
2238 + }
2239 + return n;
2240 +}
2241 +
2242 +static inline unsigned long __must_check copy_to_user(void __user *to,
2243 + const void *from, unsigned long n)
2244 +{
2245 + unsigned long over;
2246 +
2247 + if ((long)n < 0)
2248 + return n;
2249 +
2250 + if (access_ok(VERIFY_WRITE, to, n)) {
2251 + if (!__builtin_constant_p(n))
2252 + check_object_size(from, n, true);
2253 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2254 + }
2255 + if ((unsigned long)to < TASK_SIZE) {
2256 + over = (unsigned long)to + n - TASK_SIZE;
2257 + if (!__builtin_constant_p(n))
2258 + check_object_size(from, n - over, true);
2259 + return __copy_tofrom_user(to, (__force void __user *)from,
2260 + n - over) + over;
2261 + }
2262 + return n;
2263 +}
2264 +
2265 +#else /* __powerpc64__ */
2266 +
2267 +#define __copy_in_user(to, from, size) \
2268 + __copy_tofrom_user((to), (from), (size))
2269 +
2270 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2271 +{
2272 + if ((long)n < 0 || n > INT_MAX)
2273 + return n;
2274 +
2275 + if (!__builtin_constant_p(n))
2276 + check_object_size(to, n, false);
2277 +
2278 + if (likely(access_ok(VERIFY_READ, from, n)))
2279 + n = __copy_from_user(to, from, n);
2280 + else
2281 + memset(to, 0, n);
2282 + return n;
2283 +}
2284 +
2285 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2286 +{
2287 + if ((long)n < 0 || n > INT_MAX)
2288 + return n;
2289 +
2290 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2291 + if (!__builtin_constant_p(n))
2292 + check_object_size(from, n, true);
2293 + n = __copy_to_user(to, from, n);
2294 + }
2295 + return n;
2296 +}
2297 +
2298 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2299 + unsigned long n);
2300 +
2301 +#endif /* __powerpc64__ */
2302 +
2303 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2304
2305 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2306 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c
2307 --- linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2308 +++ linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2309 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2310 &cache_assoc_attr,
2311 };
2312
2313 -static struct sysfs_ops cache_index_ops = {
2314 +static const struct sysfs_ops cache_index_ops = {
2315 .show = cache_index_show,
2316 };
2317
2318 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/dma.c linux-2.6.32.42/arch/powerpc/kernel/dma.c
2319 --- linux-2.6.32.42/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2320 +++ linux-2.6.32.42/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2321 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2322 }
2323 #endif
2324
2325 -struct dma_map_ops dma_direct_ops = {
2326 +const struct dma_map_ops dma_direct_ops = {
2327 .alloc_coherent = dma_direct_alloc_coherent,
2328 .free_coherent = dma_direct_free_coherent,
2329 .map_sg = dma_direct_map_sg,
2330 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c
2331 --- linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2332 +++ linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2333 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2334 }
2335
2336 /* We support DMA to/from any memory page via the iommu */
2337 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2338 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
2339 {
2340 struct iommu_table *tbl = get_iommu_table_base(dev);
2341
2342 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c
2343 --- linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2344 +++ linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2345 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2346 * map_page, and unmap_page on highmem, use normal dma_ops
2347 * for everything else.
2348 */
2349 -struct dma_map_ops swiotlb_dma_ops = {
2350 +const struct dma_map_ops swiotlb_dma_ops = {
2351 .alloc_coherent = dma_direct_alloc_coherent,
2352 .free_coherent = dma_direct_free_coherent,
2353 .map_sg = swiotlb_map_sg_attrs,
2354 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S
2355 --- linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2356 +++ linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2357 @@ -455,6 +455,7 @@ storage_fault_common:
2358 std r14,_DAR(r1)
2359 std r15,_DSISR(r1)
2360 addi r3,r1,STACK_FRAME_OVERHEAD
2361 + bl .save_nvgprs
2362 mr r4,r14
2363 mr r5,r15
2364 ld r14,PACA_EXGEN+EX_R14(r13)
2365 @@ -464,8 +465,7 @@ storage_fault_common:
2366 cmpdi r3,0
2367 bne- 1f
2368 b .ret_from_except_lite
2369 -1: bl .save_nvgprs
2370 - mr r5,r3
2371 +1: mr r5,r3
2372 addi r3,r1,STACK_FRAME_OVERHEAD
2373 ld r4,_DAR(r1)
2374 bl .bad_page_fault
2375 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S
2376 --- linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2377 +++ linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2378 @@ -818,10 +818,10 @@ handle_page_fault:
2379 11: ld r4,_DAR(r1)
2380 ld r5,_DSISR(r1)
2381 addi r3,r1,STACK_FRAME_OVERHEAD
2382 + bl .save_nvgprs
2383 bl .do_page_fault
2384 cmpdi r3,0
2385 beq+ 13f
2386 - bl .save_nvgprs
2387 mr r5,r3
2388 addi r3,r1,STACK_FRAME_OVERHEAD
2389 lwz r4,_DAR(r1)
2390 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c
2391 --- linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2392 +++ linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2393 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2394 return 1;
2395 }
2396
2397 -static struct dma_map_ops ibmebus_dma_ops = {
2398 +static const struct dma_map_ops ibmebus_dma_ops = {
2399 .alloc_coherent = ibmebus_alloc_coherent,
2400 .free_coherent = ibmebus_free_coherent,
2401 .map_sg = ibmebus_map_sg,
2402 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/kgdb.c linux-2.6.32.42/arch/powerpc/kernel/kgdb.c
2403 --- linux-2.6.32.42/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2404 +++ linux-2.6.32.42/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2405 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2406 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2407 return 0;
2408
2409 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2410 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2411 regs->nip += 4;
2412
2413 return 1;
2414 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2415 /*
2416 * Global data
2417 */
2418 -struct kgdb_arch arch_kgdb_ops = {
2419 +const struct kgdb_arch arch_kgdb_ops = {
2420 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2421 };
2422
2423 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/module_32.c linux-2.6.32.42/arch/powerpc/kernel/module_32.c
2424 --- linux-2.6.32.42/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2425 +++ linux-2.6.32.42/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2426 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2427 me->arch.core_plt_section = i;
2428 }
2429 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2430 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2431 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2432 return -ENOEXEC;
2433 }
2434
2435 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2436
2437 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2438 /* Init, or core PLT? */
2439 - if (location >= mod->module_core
2440 - && location < mod->module_core + mod->core_size)
2441 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2442 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2443 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2444 - else
2445 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2446 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2447 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2448 + else {
2449 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2450 + return ~0UL;
2451 + }
2452
2453 /* Find this entry, or if that fails, the next avail. entry */
2454 while (entry->jump[0]) {
2455 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/module.c linux-2.6.32.42/arch/powerpc/kernel/module.c
2456 --- linux-2.6.32.42/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2457 +++ linux-2.6.32.42/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2458 @@ -31,11 +31,24 @@
2459
2460 LIST_HEAD(module_bug_list);
2461
2462 +#ifdef CONFIG_PAX_KERNEXEC
2463 void *module_alloc(unsigned long size)
2464 {
2465 if (size == 0)
2466 return NULL;
2467
2468 + return vmalloc(size);
2469 +}
2470 +
2471 +void *module_alloc_exec(unsigned long size)
2472 +#else
2473 +void *module_alloc(unsigned long size)
2474 +#endif
2475 +
2476 +{
2477 + if (size == 0)
2478 + return NULL;
2479 +
2480 return vmalloc_exec(size);
2481 }
2482
2483 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2484 vfree(module_region);
2485 }
2486
2487 +#ifdef CONFIG_PAX_KERNEXEC
2488 +void module_free_exec(struct module *mod, void *module_region)
2489 +{
2490 + module_free(mod, module_region);
2491 +}
2492 +#endif
2493 +
2494 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2495 const Elf_Shdr *sechdrs,
2496 const char *name)
2497 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/pci-common.c linux-2.6.32.42/arch/powerpc/kernel/pci-common.c
2498 --- linux-2.6.32.42/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2499 +++ linux-2.6.32.42/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2500 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2501 unsigned int ppc_pci_flags = 0;
2502
2503
2504 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2505 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2506
2507 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2508 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2509 {
2510 pci_dma_ops = dma_ops;
2511 }
2512
2513 -struct dma_map_ops *get_pci_dma_ops(void)
2514 +const struct dma_map_ops *get_pci_dma_ops(void)
2515 {
2516 return pci_dma_ops;
2517 }
2518 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/process.c linux-2.6.32.42/arch/powerpc/kernel/process.c
2519 --- linux-2.6.32.42/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2520 +++ linux-2.6.32.42/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2521 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2522 * Lookup NIP late so we have the best change of getting the
2523 * above info out without failing
2524 */
2525 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2526 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2527 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2528 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2529 #endif
2530 show_stack(current, (unsigned long *) regs->gpr[1]);
2531 if (!user_mode(regs))
2532 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2533 newsp = stack[0];
2534 ip = stack[STACK_FRAME_LR_SAVE];
2535 if (!firstframe || ip != lr) {
2536 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2537 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2538 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2539 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2540 - printk(" (%pS)",
2541 + printk(" (%pA)",
2542 (void *)current->ret_stack[curr_frame].ret);
2543 curr_frame--;
2544 }
2545 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2546 struct pt_regs *regs = (struct pt_regs *)
2547 (sp + STACK_FRAME_OVERHEAD);
2548 lr = regs->link;
2549 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2550 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2551 regs->trap, (void *)regs->nip, (void *)lr);
2552 firstframe = 1;
2553 }
2554 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2555 }
2556
2557 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2558 -
2559 -unsigned long arch_align_stack(unsigned long sp)
2560 -{
2561 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2562 - sp -= get_random_int() & ~PAGE_MASK;
2563 - return sp & ~0xf;
2564 -}
2565 -
2566 -static inline unsigned long brk_rnd(void)
2567 -{
2568 - unsigned long rnd = 0;
2569 -
2570 - /* 8MB for 32bit, 1GB for 64bit */
2571 - if (is_32bit_task())
2572 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2573 - else
2574 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2575 -
2576 - return rnd << PAGE_SHIFT;
2577 -}
2578 -
2579 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2580 -{
2581 - unsigned long base = mm->brk;
2582 - unsigned long ret;
2583 -
2584 -#ifdef CONFIG_PPC_STD_MMU_64
2585 - /*
2586 - * If we are using 1TB segments and we are allowed to randomise
2587 - * the heap, we can put it above 1TB so it is backed by a 1TB
2588 - * segment. Otherwise the heap will be in the bottom 1TB
2589 - * which always uses 256MB segments and this may result in a
2590 - * performance penalty.
2591 - */
2592 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2593 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2594 -#endif
2595 -
2596 - ret = PAGE_ALIGN(base + brk_rnd());
2597 -
2598 - if (ret < mm->brk)
2599 - return mm->brk;
2600 -
2601 - return ret;
2602 -}
2603 -
2604 -unsigned long randomize_et_dyn(unsigned long base)
2605 -{
2606 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2607 -
2608 - if (ret < base)
2609 - return base;
2610 -
2611 - return ret;
2612 -}
2613 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/signal_32.c linux-2.6.32.42/arch/powerpc/kernel/signal_32.c
2614 --- linux-2.6.32.42/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2615 +++ linux-2.6.32.42/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2616 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2617 /* Save user registers on the stack */
2618 frame = &rt_sf->uc.uc_mcontext;
2619 addr = frame;
2620 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2621 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2622 if (save_user_regs(regs, frame, 0, 1))
2623 goto badframe;
2624 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2625 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/signal_64.c linux-2.6.32.42/arch/powerpc/kernel/signal_64.c
2626 --- linux-2.6.32.42/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2627 +++ linux-2.6.32.42/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2628 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2629 current->thread.fpscr.val = 0;
2630
2631 /* Set up to return from userspace. */
2632 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2633 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2634 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2635 } else {
2636 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2637 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c
2638 --- linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2639 +++ linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2640 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2641 if (oldlenp) {
2642 if (!error) {
2643 if (get_user(oldlen, oldlenp) ||
2644 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2645 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2646 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2647 error = -EFAULT;
2648 }
2649 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2650 }
2651 return error;
2652 }
2653 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/traps.c linux-2.6.32.42/arch/powerpc/kernel/traps.c
2654 --- linux-2.6.32.42/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2655 +++ linux-2.6.32.42/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2656 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2657 static inline void pmac_backlight_unblank(void) { }
2658 #endif
2659
2660 +extern void gr_handle_kernel_exploit(void);
2661 +
2662 int die(const char *str, struct pt_regs *regs, long err)
2663 {
2664 static struct {
2665 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2666 if (panic_on_oops)
2667 panic("Fatal exception");
2668
2669 + gr_handle_kernel_exploit();
2670 +
2671 oops_exit();
2672 do_exit(err);
2673
2674 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/vdso.c linux-2.6.32.42/arch/powerpc/kernel/vdso.c
2675 --- linux-2.6.32.42/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2676 +++ linux-2.6.32.42/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2677 @@ -36,6 +36,7 @@
2678 #include <asm/firmware.h>
2679 #include <asm/vdso.h>
2680 #include <asm/vdso_datapage.h>
2681 +#include <asm/mman.h>
2682
2683 #include "setup.h"
2684
2685 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2686 vdso_base = VDSO32_MBASE;
2687 #endif
2688
2689 - current->mm->context.vdso_base = 0;
2690 + current->mm->context.vdso_base = ~0UL;
2691
2692 /* vDSO has a problem and was disabled, just don't "enable" it for the
2693 * process
2694 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2695 vdso_base = get_unmapped_area(NULL, vdso_base,
2696 (vdso_pages << PAGE_SHIFT) +
2697 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2698 - 0, 0);
2699 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2700 if (IS_ERR_VALUE(vdso_base)) {
2701 rc = vdso_base;
2702 goto fail_mmapsem;
2703 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/vio.c linux-2.6.32.42/arch/powerpc/kernel/vio.c
2704 --- linux-2.6.32.42/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2705 +++ linux-2.6.32.42/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2706 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2707 vio_cmo_dealloc(viodev, alloc_size);
2708 }
2709
2710 -struct dma_map_ops vio_dma_mapping_ops = {
2711 +static const struct dma_map_ops vio_dma_mapping_ops = {
2712 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2713 .free_coherent = vio_dma_iommu_free_coherent,
2714 .map_sg = vio_dma_iommu_map_sg,
2715 .unmap_sg = vio_dma_iommu_unmap_sg,
2716 + .dma_supported = dma_iommu_dma_supported,
2717 .map_page = vio_dma_iommu_map_page,
2718 .unmap_page = vio_dma_iommu_unmap_page,
2719
2720 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2721
2722 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2723 {
2724 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2725 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2726 }
2727
2728 diff -urNp linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c
2729 --- linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2730 +++ linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2731 @@ -9,22 +9,6 @@
2732 #include <linux/module.h>
2733 #include <asm/uaccess.h>
2734
2735 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2736 -{
2737 - if (likely(access_ok(VERIFY_READ, from, n)))
2738 - n = __copy_from_user(to, from, n);
2739 - else
2740 - memset(to, 0, n);
2741 - return n;
2742 -}
2743 -
2744 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2745 -{
2746 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2747 - n = __copy_to_user(to, from, n);
2748 - return n;
2749 -}
2750 -
2751 unsigned long copy_in_user(void __user *to, const void __user *from,
2752 unsigned long n)
2753 {
2754 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2755 return n;
2756 }
2757
2758 -EXPORT_SYMBOL(copy_from_user);
2759 -EXPORT_SYMBOL(copy_to_user);
2760 EXPORT_SYMBOL(copy_in_user);
2761
2762 diff -urNp linux-2.6.32.42/arch/powerpc/mm/fault.c linux-2.6.32.42/arch/powerpc/mm/fault.c
2763 --- linux-2.6.32.42/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2764 +++ linux-2.6.32.42/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2765 @@ -30,6 +30,10 @@
2766 #include <linux/kprobes.h>
2767 #include <linux/kdebug.h>
2768 #include <linux/perf_event.h>
2769 +#include <linux/slab.h>
2770 +#include <linux/pagemap.h>
2771 +#include <linux/compiler.h>
2772 +#include <linux/unistd.h>
2773
2774 #include <asm/firmware.h>
2775 #include <asm/page.h>
2776 @@ -40,6 +44,7 @@
2777 #include <asm/uaccess.h>
2778 #include <asm/tlbflush.h>
2779 #include <asm/siginfo.h>
2780 +#include <asm/ptrace.h>
2781
2782
2783 #ifdef CONFIG_KPROBES
2784 @@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2785 }
2786 #endif
2787
2788 +#ifdef CONFIG_PAX_PAGEEXEC
2789 +/*
2790 + * PaX: decide what to do with offenders (regs->nip = fault address)
2791 + *
2792 + * returns 1 when task should be killed
2793 + */
2794 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2795 +{
2796 + return 1;
2797 +}
2798 +
2799 +void pax_report_insns(void *pc, void *sp)
2800 +{
2801 + unsigned long i;
2802 +
2803 + printk(KERN_ERR "PAX: bytes at PC: ");
2804 + for (i = 0; i < 5; i++) {
2805 + unsigned int c;
2806 + if (get_user(c, (unsigned int __user *)pc+i))
2807 + printk(KERN_CONT "???????? ");
2808 + else
2809 + printk(KERN_CONT "%08x ", c);
2810 + }
2811 + printk("\n");
2812 +}
2813 +#endif
2814 +
2815 /*
2816 * Check whether the instruction at regs->nip is a store using
2817 * an update addressing form which will update r1.
2818 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2819 * indicate errors in DSISR but can validly be set in SRR1.
2820 */
2821 if (trap == 0x400)
2822 - error_code &= 0x48200000;
2823 + error_code &= 0x58200000;
2824 else
2825 is_write = error_code & DSISR_ISSTORE;
2826 #else
2827 @@ -250,7 +282,7 @@ good_area:
2828 * "undefined". Of those that can be set, this is the only
2829 * one which seems bad.
2830 */
2831 - if (error_code & 0x10000000)
2832 + if (error_code & DSISR_GUARDED)
2833 /* Guarded storage error. */
2834 goto bad_area;
2835 #endif /* CONFIG_8xx */
2836 @@ -265,7 +297,7 @@ good_area:
2837 * processors use the same I/D cache coherency mechanism
2838 * as embedded.
2839 */
2840 - if (error_code & DSISR_PROTFAULT)
2841 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2842 goto bad_area;
2843 #endif /* CONFIG_PPC_STD_MMU */
2844
2845 @@ -335,6 +367,23 @@ bad_area:
2846 bad_area_nosemaphore:
2847 /* User mode accesses cause a SIGSEGV */
2848 if (user_mode(regs)) {
2849 +
2850 +#ifdef CONFIG_PAX_PAGEEXEC
2851 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2852 +#ifdef CONFIG_PPC_STD_MMU
2853 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2854 +#else
2855 + if (is_exec && regs->nip == address) {
2856 +#endif
2857 + switch (pax_handle_fetch_fault(regs)) {
2858 + }
2859 +
2860 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2861 + do_group_exit(SIGKILL);
2862 + }
2863 + }
2864 +#endif
2865 +
2866 _exception(SIGSEGV, regs, code, address);
2867 return 0;
2868 }
2869 diff -urNp linux-2.6.32.42/arch/powerpc/mm/mmap_64.c linux-2.6.32.42/arch/powerpc/mm/mmap_64.c
2870 --- linux-2.6.32.42/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
2871 +++ linux-2.6.32.42/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
2872 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2873 */
2874 if (mmap_is_legacy()) {
2875 mm->mmap_base = TASK_UNMAPPED_BASE;
2876 +
2877 +#ifdef CONFIG_PAX_RANDMMAP
2878 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2879 + mm->mmap_base += mm->delta_mmap;
2880 +#endif
2881 +
2882 mm->get_unmapped_area = arch_get_unmapped_area;
2883 mm->unmap_area = arch_unmap_area;
2884 } else {
2885 mm->mmap_base = mmap_base();
2886 +
2887 +#ifdef CONFIG_PAX_RANDMMAP
2888 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2889 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2890 +#endif
2891 +
2892 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2893 mm->unmap_area = arch_unmap_area_topdown;
2894 }
2895 diff -urNp linux-2.6.32.42/arch/powerpc/mm/slice.c linux-2.6.32.42/arch/powerpc/mm/slice.c
2896 --- linux-2.6.32.42/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
2897 +++ linux-2.6.32.42/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
2898 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2899 if ((mm->task_size - len) < addr)
2900 return 0;
2901 vma = find_vma(mm, addr);
2902 - return (!vma || (addr + len) <= vma->vm_start);
2903 + return check_heap_stack_gap(vma, addr, len);
2904 }
2905
2906 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2907 @@ -256,7 +256,7 @@ full_search:
2908 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2909 continue;
2910 }
2911 - if (!vma || addr + len <= vma->vm_start) {
2912 + if (check_heap_stack_gap(vma, addr, len)) {
2913 /*
2914 * Remember the place where we stopped the search:
2915 */
2916 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2917 }
2918 }
2919
2920 - addr = mm->mmap_base;
2921 - while (addr > len) {
2922 + if (mm->mmap_base < len)
2923 + addr = -ENOMEM;
2924 + else
2925 + addr = mm->mmap_base - len;
2926 +
2927 + while (!IS_ERR_VALUE(addr)) {
2928 /* Go down by chunk size */
2929 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2930 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2931
2932 /* Check for hit with different page size */
2933 mask = slice_range_to_mask(addr, len);
2934 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2935 * return with success:
2936 */
2937 vma = find_vma(mm, addr);
2938 - if (!vma || (addr + len) <= vma->vm_start) {
2939 + if (check_heap_stack_gap(vma, addr, len)) {
2940 /* remember the address as a hint for next time */
2941 if (use_cache)
2942 mm->free_area_cache = addr;
2943 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2944 mm->cached_hole_size = vma->vm_start - addr;
2945
2946 /* try just below the current vma->vm_start */
2947 - addr = vma->vm_start;
2948 + addr = skip_heap_stack_gap(vma, len);
2949 }
2950
2951 /*
2952 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2953 if (fixed && addr > (mm->task_size - len))
2954 return -EINVAL;
2955
2956 +#ifdef CONFIG_PAX_RANDMMAP
2957 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2958 + addr = 0;
2959 +#endif
2960 +
2961 /* If hint, make sure it matches our alignment restrictions */
2962 if (!fixed && addr) {
2963 addr = _ALIGN_UP(addr, 1ul << pshift);
2964 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c
2965 --- linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
2966 +++ linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
2967 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
2968 lite5200_pm_target_state = PM_SUSPEND_ON;
2969 }
2970
2971 -static struct platform_suspend_ops lite5200_pm_ops = {
2972 +static const struct platform_suspend_ops lite5200_pm_ops = {
2973 .valid = lite5200_pm_valid,
2974 .begin = lite5200_pm_begin,
2975 .prepare = lite5200_pm_prepare,
2976 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c
2977 --- linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
2978 +++ linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
2979 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
2980 iounmap(mbar);
2981 }
2982
2983 -static struct platform_suspend_ops mpc52xx_pm_ops = {
2984 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
2985 .valid = mpc52xx_pm_valid,
2986 .prepare = mpc52xx_pm_prepare,
2987 .enter = mpc52xx_pm_enter,
2988 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c
2989 --- linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
2990 +++ linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
2991 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
2992 return ret;
2993 }
2994
2995 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
2996 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
2997 .valid = mpc83xx_suspend_valid,
2998 .begin = mpc83xx_suspend_begin,
2999 .enter = mpc83xx_suspend_enter,
3000 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c
3001 --- linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3002 +++ linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3003 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3004
3005 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3006
3007 -struct dma_map_ops dma_iommu_fixed_ops = {
3008 +const struct dma_map_ops dma_iommu_fixed_ops = {
3009 .alloc_coherent = dma_fixed_alloc_coherent,
3010 .free_coherent = dma_fixed_free_coherent,
3011 .map_sg = dma_fixed_map_sg,
3012 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c
3013 --- linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3014 +++ linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3015 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3016 return mask >= DMA_BIT_MASK(32);
3017 }
3018
3019 -static struct dma_map_ops ps3_sb_dma_ops = {
3020 +static const struct dma_map_ops ps3_sb_dma_ops = {
3021 .alloc_coherent = ps3_alloc_coherent,
3022 .free_coherent = ps3_free_coherent,
3023 .map_sg = ps3_sb_map_sg,
3024 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3025 .unmap_page = ps3_unmap_page,
3026 };
3027
3028 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3029 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3030 .alloc_coherent = ps3_alloc_coherent,
3031 .free_coherent = ps3_free_coherent,
3032 .map_sg = ps3_ioc0_map_sg,
3033 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig
3034 --- linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3035 +++ linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3036 @@ -2,6 +2,8 @@ config PPC_PSERIES
3037 depends on PPC64 && PPC_BOOK3S
3038 bool "IBM pSeries & new (POWER5-based) iSeries"
3039 select MPIC
3040 + select PCI_MSI
3041 + select XICS
3042 select PPC_I8259
3043 select PPC_RTAS
3044 select RTAS_ERROR_LOGGING
3045 diff -urNp linux-2.6.32.42/arch/s390/include/asm/elf.h linux-2.6.32.42/arch/s390/include/asm/elf.h
3046 --- linux-2.6.32.42/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3047 +++ linux-2.6.32.42/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3048 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3049 that it will "exec", and that there is sufficient room for the brk. */
3050 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3051
3052 +#ifdef CONFIG_PAX_ASLR
3053 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3054 +
3055 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3056 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3057 +#endif
3058 +
3059 /* This yields a mask that user programs can use to figure out what
3060 instruction set this CPU supports. */
3061
3062 diff -urNp linux-2.6.32.42/arch/s390/include/asm/setup.h linux-2.6.32.42/arch/s390/include/asm/setup.h
3063 --- linux-2.6.32.42/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3064 +++ linux-2.6.32.42/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3065 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
3066 void detect_memory_layout(struct mem_chunk chunk[]);
3067
3068 #ifdef CONFIG_S390_SWITCH_AMODE
3069 -extern unsigned int switch_amode;
3070 +#define switch_amode (1)
3071 #else
3072 #define switch_amode (0)
3073 #endif
3074
3075 #ifdef CONFIG_S390_EXEC_PROTECT
3076 -extern unsigned int s390_noexec;
3077 +#define s390_noexec (1)
3078 #else
3079 #define s390_noexec (0)
3080 #endif
3081 diff -urNp linux-2.6.32.42/arch/s390/include/asm/uaccess.h linux-2.6.32.42/arch/s390/include/asm/uaccess.h
3082 --- linux-2.6.32.42/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3083 +++ linux-2.6.32.42/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3084 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
3085 copy_to_user(void __user *to, const void *from, unsigned long n)
3086 {
3087 might_fault();
3088 +
3089 + if ((long)n < 0)
3090 + return n;
3091 +
3092 if (access_ok(VERIFY_WRITE, to, n))
3093 n = __copy_to_user(to, from, n);
3094 return n;
3095 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3096 static inline unsigned long __must_check
3097 __copy_from_user(void *to, const void __user *from, unsigned long n)
3098 {
3099 + if ((long)n < 0)
3100 + return n;
3101 +
3102 if (__builtin_constant_p(n) && (n <= 256))
3103 return uaccess.copy_from_user_small(n, from, to);
3104 else
3105 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
3106 copy_from_user(void *to, const void __user *from, unsigned long n)
3107 {
3108 might_fault();
3109 +
3110 + if ((long)n < 0)
3111 + return n;
3112 +
3113 if (access_ok(VERIFY_READ, from, n))
3114 n = __copy_from_user(to, from, n);
3115 else
3116 diff -urNp linux-2.6.32.42/arch/s390/Kconfig linux-2.6.32.42/arch/s390/Kconfig
3117 --- linux-2.6.32.42/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3118 +++ linux-2.6.32.42/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3119 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3120
3121 config S390_SWITCH_AMODE
3122 bool "Switch kernel/user addressing modes"
3123 + default y
3124 help
3125 This option allows to switch the addressing modes of kernel and user
3126 - space. The kernel parameter switch_amode=on will enable this feature,
3127 - default is disabled. Enabling this (via kernel parameter) on machines
3128 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3129 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3130 + will reduce system performance.
3131
3132 Note that this option will also be selected by selecting the execute
3133 - protection option below. Enabling the execute protection via the
3134 - noexec kernel parameter will also switch the addressing modes,
3135 - independent of the switch_amode kernel parameter.
3136 + protection option below. Enabling the execute protection will also
3137 + switch the addressing modes, independent of this option.
3138
3139
3140 config S390_EXEC_PROTECT
3141 bool "Data execute protection"
3142 + default y
3143 select S390_SWITCH_AMODE
3144 help
3145 This option allows to enable a buffer overflow protection for user
3146 space programs and it also selects the addressing mode option above.
3147 - The kernel parameter noexec=on will enable this feature and also
3148 - switch the addressing modes, default is disabled. Enabling this (via
3149 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3150 - will reduce system performance.
3151 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3152 + reduce system performance.
3153
3154 comment "Code generation options"
3155
3156 diff -urNp linux-2.6.32.42/arch/s390/kernel/module.c linux-2.6.32.42/arch/s390/kernel/module.c
3157 --- linux-2.6.32.42/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3158 +++ linux-2.6.32.42/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3159 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3160
3161 /* Increase core size by size of got & plt and set start
3162 offsets for got and plt. */
3163 - me->core_size = ALIGN(me->core_size, 4);
3164 - me->arch.got_offset = me->core_size;
3165 - me->core_size += me->arch.got_size;
3166 - me->arch.plt_offset = me->core_size;
3167 - me->core_size += me->arch.plt_size;
3168 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3169 + me->arch.got_offset = me->core_size_rw;
3170 + me->core_size_rw += me->arch.got_size;
3171 + me->arch.plt_offset = me->core_size_rx;
3172 + me->core_size_rx += me->arch.plt_size;
3173 return 0;
3174 }
3175
3176 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3177 if (info->got_initialized == 0) {
3178 Elf_Addr *gotent;
3179
3180 - gotent = me->module_core + me->arch.got_offset +
3181 + gotent = me->module_core_rw + me->arch.got_offset +
3182 info->got_offset;
3183 *gotent = val;
3184 info->got_initialized = 1;
3185 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3186 else if (r_type == R_390_GOTENT ||
3187 r_type == R_390_GOTPLTENT)
3188 *(unsigned int *) loc =
3189 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3190 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3191 else if (r_type == R_390_GOT64 ||
3192 r_type == R_390_GOTPLT64)
3193 *(unsigned long *) loc = val;
3194 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3195 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3196 if (info->plt_initialized == 0) {
3197 unsigned int *ip;
3198 - ip = me->module_core + me->arch.plt_offset +
3199 + ip = me->module_core_rx + me->arch.plt_offset +
3200 info->plt_offset;
3201 #ifndef CONFIG_64BIT
3202 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3203 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3204 val - loc + 0xffffUL < 0x1ffffeUL) ||
3205 (r_type == R_390_PLT32DBL &&
3206 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3207 - val = (Elf_Addr) me->module_core +
3208 + val = (Elf_Addr) me->module_core_rx +
3209 me->arch.plt_offset +
3210 info->plt_offset;
3211 val += rela->r_addend - loc;
3212 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3213 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3214 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3215 val = val + rela->r_addend -
3216 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3217 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3218 if (r_type == R_390_GOTOFF16)
3219 *(unsigned short *) loc = val;
3220 else if (r_type == R_390_GOTOFF32)
3221 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3222 break;
3223 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3224 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3225 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3226 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3227 rela->r_addend - loc;
3228 if (r_type == R_390_GOTPC)
3229 *(unsigned int *) loc = val;
3230 diff -urNp linux-2.6.32.42/arch/s390/kernel/setup.c linux-2.6.32.42/arch/s390/kernel/setup.c
3231 --- linux-2.6.32.42/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3232 +++ linux-2.6.32.42/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3233 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3234 early_param("mem", early_parse_mem);
3235
3236 #ifdef CONFIG_S390_SWITCH_AMODE
3237 -unsigned int switch_amode = 0;
3238 -EXPORT_SYMBOL_GPL(switch_amode);
3239 -
3240 static int set_amode_and_uaccess(unsigned long user_amode,
3241 unsigned long user32_amode)
3242 {
3243 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3244 return 0;
3245 }
3246 }
3247 -
3248 -/*
3249 - * Switch kernel/user addressing modes?
3250 - */
3251 -static int __init early_parse_switch_amode(char *p)
3252 -{
3253 - switch_amode = 1;
3254 - return 0;
3255 -}
3256 -early_param("switch_amode", early_parse_switch_amode);
3257 -
3258 #else /* CONFIG_S390_SWITCH_AMODE */
3259 static inline int set_amode_and_uaccess(unsigned long user_amode,
3260 unsigned long user32_amode)
3261 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3262 }
3263 #endif /* CONFIG_S390_SWITCH_AMODE */
3264
3265 -#ifdef CONFIG_S390_EXEC_PROTECT
3266 -unsigned int s390_noexec = 0;
3267 -EXPORT_SYMBOL_GPL(s390_noexec);
3268 -
3269 -/*
3270 - * Enable execute protection?
3271 - */
3272 -static int __init early_parse_noexec(char *p)
3273 -{
3274 - if (!strncmp(p, "off", 3))
3275 - return 0;
3276 - switch_amode = 1;
3277 - s390_noexec = 1;
3278 - return 0;
3279 -}
3280 -early_param("noexec", early_parse_noexec);
3281 -#endif /* CONFIG_S390_EXEC_PROTECT */
3282 -
3283 static void setup_addressing_mode(void)
3284 {
3285 if (s390_noexec) {
3286 diff -urNp linux-2.6.32.42/arch/s390/mm/mmap.c linux-2.6.32.42/arch/s390/mm/mmap.c
3287 --- linux-2.6.32.42/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3288 +++ linux-2.6.32.42/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3289 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3290 */
3291 if (mmap_is_legacy()) {
3292 mm->mmap_base = TASK_UNMAPPED_BASE;
3293 +
3294 +#ifdef CONFIG_PAX_RANDMMAP
3295 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3296 + mm->mmap_base += mm->delta_mmap;
3297 +#endif
3298 +
3299 mm->get_unmapped_area = arch_get_unmapped_area;
3300 mm->unmap_area = arch_unmap_area;
3301 } else {
3302 mm->mmap_base = mmap_base();
3303 +
3304 +#ifdef CONFIG_PAX_RANDMMAP
3305 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3306 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3307 +#endif
3308 +
3309 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3310 mm->unmap_area = arch_unmap_area_topdown;
3311 }
3312 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3313 */
3314 if (mmap_is_legacy()) {
3315 mm->mmap_base = TASK_UNMAPPED_BASE;
3316 +
3317 +#ifdef CONFIG_PAX_RANDMMAP
3318 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3319 + mm->mmap_base += mm->delta_mmap;
3320 +#endif
3321 +
3322 mm->get_unmapped_area = s390_get_unmapped_area;
3323 mm->unmap_area = arch_unmap_area;
3324 } else {
3325 mm->mmap_base = mmap_base();
3326 +
3327 +#ifdef CONFIG_PAX_RANDMMAP
3328 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3329 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3330 +#endif
3331 +
3332 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3333 mm->unmap_area = arch_unmap_area_topdown;
3334 }
3335 diff -urNp linux-2.6.32.42/arch/score/include/asm/system.h linux-2.6.32.42/arch/score/include/asm/system.h
3336 --- linux-2.6.32.42/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3337 +++ linux-2.6.32.42/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3338 @@ -17,7 +17,7 @@ do { \
3339 #define finish_arch_switch(prev) do {} while (0)
3340
3341 typedef void (*vi_handler_t)(void);
3342 -extern unsigned long arch_align_stack(unsigned long sp);
3343 +#define arch_align_stack(x) (x)
3344
3345 #define mb() barrier()
3346 #define rmb() barrier()
3347 diff -urNp linux-2.6.32.42/arch/score/kernel/process.c linux-2.6.32.42/arch/score/kernel/process.c
3348 --- linux-2.6.32.42/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3349 +++ linux-2.6.32.42/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3350 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3351
3352 return task_pt_regs(task)->cp0_epc;
3353 }
3354 -
3355 -unsigned long arch_align_stack(unsigned long sp)
3356 -{
3357 - return sp;
3358 -}
3359 diff -urNp linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c
3360 --- linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3361 +++ linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3362 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3363 return 0;
3364 }
3365
3366 -static struct platform_suspend_ops hp6x0_pm_ops = {
3367 +static const struct platform_suspend_ops hp6x0_pm_ops = {
3368 .enter = hp6x0_pm_enter,
3369 .valid = suspend_valid_only_mem,
3370 };
3371 diff -urNp linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c
3372 --- linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3373 +++ linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3374 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3375 NULL,
3376 };
3377
3378 -static struct sysfs_ops sq_sysfs_ops = {
3379 +static const struct sysfs_ops sq_sysfs_ops = {
3380 .show = sq_sysfs_show,
3381 .store = sq_sysfs_store,
3382 };
3383 diff -urNp linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c
3384 --- linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3385 +++ linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3386 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3387 return 0;
3388 }
3389
3390 -static struct platform_suspend_ops sh_pm_ops = {
3391 +static const struct platform_suspend_ops sh_pm_ops = {
3392 .enter = sh_pm_enter,
3393 .valid = suspend_valid_only_mem,
3394 };
3395 diff -urNp linux-2.6.32.42/arch/sh/kernel/kgdb.c linux-2.6.32.42/arch/sh/kernel/kgdb.c
3396 --- linux-2.6.32.42/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3397 +++ linux-2.6.32.42/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3398 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3399 {
3400 }
3401
3402 -struct kgdb_arch arch_kgdb_ops = {
3403 +const struct kgdb_arch arch_kgdb_ops = {
3404 /* Breakpoint instruction: trapa #0x3c */
3405 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3406 .gdb_bpt_instr = { 0x3c, 0xc3 },
3407 diff -urNp linux-2.6.32.42/arch/sh/mm/mmap.c linux-2.6.32.42/arch/sh/mm/mmap.c
3408 --- linux-2.6.32.42/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3409 +++ linux-2.6.32.42/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3410 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3411 addr = PAGE_ALIGN(addr);
3412
3413 vma = find_vma(mm, addr);
3414 - if (TASK_SIZE - len >= addr &&
3415 - (!vma || addr + len <= vma->vm_start))
3416 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3417 return addr;
3418 }
3419
3420 @@ -106,7 +105,7 @@ full_search:
3421 }
3422 return -ENOMEM;
3423 }
3424 - if (likely(!vma || addr + len <= vma->vm_start)) {
3425 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3426 /*
3427 * Remember the place where we stopped the search:
3428 */
3429 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3430 addr = PAGE_ALIGN(addr);
3431
3432 vma = find_vma(mm, addr);
3433 - if (TASK_SIZE - len >= addr &&
3434 - (!vma || addr + len <= vma->vm_start))
3435 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3436 return addr;
3437 }
3438
3439 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3440 /* make sure it can fit in the remaining address space */
3441 if (likely(addr > len)) {
3442 vma = find_vma(mm, addr-len);
3443 - if (!vma || addr <= vma->vm_start) {
3444 + if (check_heap_stack_gap(vma, addr - len, len)) {
3445 /* remember the address as a hint for next time */
3446 return (mm->free_area_cache = addr-len);
3447 }
3448 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3449 if (unlikely(mm->mmap_base < len))
3450 goto bottomup;
3451
3452 - addr = mm->mmap_base-len;
3453 - if (do_colour_align)
3454 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3455 + addr = mm->mmap_base - len;
3456
3457 do {
3458 + if (do_colour_align)
3459 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3460 /*
3461 * Lookup failure means no vma is above this address,
3462 * else if new region fits below vma->vm_start,
3463 * return with success:
3464 */
3465 vma = find_vma(mm, addr);
3466 - if (likely(!vma || addr+len <= vma->vm_start)) {
3467 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3468 /* remember the address as a hint for next time */
3469 return (mm->free_area_cache = addr);
3470 }
3471 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3472 mm->cached_hole_size = vma->vm_start - addr;
3473
3474 /* try just below the current vma->vm_start */
3475 - addr = vma->vm_start-len;
3476 - if (do_colour_align)
3477 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3478 - } while (likely(len < vma->vm_start));
3479 + addr = skip_heap_stack_gap(vma, len);
3480 + } while (!IS_ERR_VALUE(addr));
3481
3482 bottomup:
3483 /*
3484 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h
3485 --- linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3486 +++ linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h 2011-05-04 17:56:20.000000000 -0400
3487 @@ -14,18 +14,40 @@
3488 #define ATOMIC64_INIT(i) { (i) }
3489
3490 #define atomic_read(v) ((v)->counter)
3491 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3492 +{
3493 + return v->counter;
3494 +}
3495 #define atomic64_read(v) ((v)->counter)
3496 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3497 +{
3498 + return v->counter;
3499 +}
3500
3501 #define atomic_set(v, i) (((v)->counter) = i)
3502 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3503 +{
3504 + v->counter = i;
3505 +}
3506 #define atomic64_set(v, i) (((v)->counter) = i)
3507 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3508 +{
3509 + v->counter = i;
3510 +}
3511
3512 extern void atomic_add(int, atomic_t *);
3513 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3514 extern void atomic64_add(long, atomic64_t *);
3515 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3516 extern void atomic_sub(int, atomic_t *);
3517 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3518 extern void atomic64_sub(long, atomic64_t *);
3519 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3520
3521 extern int atomic_add_ret(int, atomic_t *);
3522 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3523 extern long atomic64_add_ret(long, atomic64_t *);
3524 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3525 extern int atomic_sub_ret(int, atomic_t *);
3526 extern long atomic64_sub_ret(long, atomic64_t *);
3527
3528 @@ -33,7 +55,15 @@ extern long atomic64_sub_ret(long, atomi
3529 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3530
3531 #define atomic_inc_return(v) atomic_add_ret(1, v)
3532 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3533 +{
3534 + return atomic_add_ret_unchecked(1, v);
3535 +}
3536 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3537 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3538 +{
3539 + return atomic64_add_ret_unchecked(1, v);
3540 +}
3541
3542 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3543 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3544 @@ -50,6 +80,7 @@ extern long atomic64_sub_ret(long, atomi
3545 * other cases.
3546 */
3547 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3548 +#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
3549 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3550
3551 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3552 @@ -59,30 +90,59 @@ extern long atomic64_sub_ret(long, atomi
3553 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3554
3555 #define atomic_inc(v) atomic_add(1, v)
3556 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3557 +{
3558 + atomic_add_unchecked(1, v);
3559 +}
3560 #define atomic64_inc(v) atomic64_add(1, v)
3561 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3562 +{
3563 + atomic64_add_unchecked(1, v);
3564 +}
3565
3566 #define atomic_dec(v) atomic_sub(1, v)
3567 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3568 +{
3569 + atomic_sub_unchecked(1, v);
3570 +}
3571 #define atomic64_dec(v) atomic64_sub(1, v)
3572 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3573 +{
3574 + atomic64_sub_unchecked(1, v);
3575 +}
3576
3577 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3578 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3579
3580 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3581 +#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3582 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3583 +#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
3584
3585 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3586 {
3587 - int c, old;
3588 + int c, old, new;
3589 c = atomic_read(v);
3590 for (;;) {
3591 - if (unlikely(c == (u)))
3592 + if (unlikely(c == u))
3593 break;
3594 - old = atomic_cmpxchg((v), c, c + (a));
3595 +
3596 + asm volatile("addcc %2, %0, %0\n"
3597 +
3598 +#ifdef CONFIG_PAX_REFCOUNT
3599 + "tvs %%icc, 6\n"
3600 +#endif
3601 +
3602 + : "=r" (new)
3603 + : "0" (c), "ir" (a)
3604 + : "cc");
3605 +
3606 + old = atomic_cmpxchg(v, c, new);
3607 if (likely(old == c))
3608 break;
3609 c = old;
3610 }
3611 - return c != (u);
3612 + return c != u;
3613 }
3614
3615 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3616 @@ -93,17 +153,28 @@ static inline int atomic_add_unless(atom
3617
3618 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3619 {
3620 - long c, old;
3621 + long c, old, new;
3622 c = atomic64_read(v);
3623 for (;;) {
3624 - if (unlikely(c == (u)))
3625 + if (unlikely(c == u))
3626 break;
3627 - old = atomic64_cmpxchg((v), c, c + (a));
3628 +
3629 + asm volatile("addcc %2, %0, %0\n"
3630 +
3631 +#ifdef CONFIG_PAX_REFCOUNT
3632 + "tvs %%xcc, 6\n"
3633 +#endif
3634 +
3635 + : "=r" (new)
3636 + : "0" (c), "ir" (a)
3637 + : "cc");
3638 +
3639 + old = atomic64_cmpxchg(v, c, new);
3640 if (likely(old == c))
3641 break;
3642 c = old;
3643 }
3644 - return c != (u);
3645 + return c != u;
3646 }
3647
3648 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3649 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/cache.h linux-2.6.32.42/arch/sparc/include/asm/cache.h
3650 --- linux-2.6.32.42/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3651 +++ linux-2.6.32.42/arch/sparc/include/asm/cache.h 2011-05-17 19:26:34.000000000 -0400
3652 @@ -8,7 +8,7 @@
3653 #define _SPARC_CACHE_H
3654
3655 #define L1_CACHE_SHIFT 5
3656 -#define L1_CACHE_BYTES 32
3657 +#define L1_CACHE_BYTES 32U
3658 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3659
3660 #ifdef CONFIG_SPARC32
3661 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h
3662 --- linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3663 +++ linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3664 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3665 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3666 #define dma_is_consistent(d, h) (1)
3667
3668 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3669 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3670 extern struct bus_type pci_bus_type;
3671
3672 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3673 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3674 {
3675 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3676 if (dev->bus == &pci_bus_type)
3677 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3678 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3679 dma_addr_t *dma_handle, gfp_t flag)
3680 {
3681 - struct dma_map_ops *ops = get_dma_ops(dev);
3682 + const struct dma_map_ops *ops = get_dma_ops(dev);
3683 void *cpu_addr;
3684
3685 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3686 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3687 static inline void dma_free_coherent(struct device *dev, size_t size,
3688 void *cpu_addr, dma_addr_t dma_handle)
3689 {
3690 - struct dma_map_ops *ops = get_dma_ops(dev);
3691 + const struct dma_map_ops *ops = get_dma_ops(dev);
3692
3693 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3694 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3695 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/elf_32.h linux-2.6.32.42/arch/sparc/include/asm/elf_32.h
3696 --- linux-2.6.32.42/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3697 +++ linux-2.6.32.42/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3698 @@ -116,6 +116,13 @@ typedef struct {
3699
3700 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3701
3702 +#ifdef CONFIG_PAX_ASLR
3703 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3704 +
3705 +#define PAX_DELTA_MMAP_LEN 16
3706 +#define PAX_DELTA_STACK_LEN 16
3707 +#endif
3708 +
3709 /* This yields a mask that user programs can use to figure out what
3710 instruction set this cpu supports. This can NOT be done in userspace
3711 on Sparc. */
3712 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/elf_64.h linux-2.6.32.42/arch/sparc/include/asm/elf_64.h
3713 --- linux-2.6.32.42/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3714 +++ linux-2.6.32.42/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3715 @@ -163,6 +163,12 @@ typedef struct {
3716 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3717 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3718
3719 +#ifdef CONFIG_PAX_ASLR
3720 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3721 +
3722 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3723 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3724 +#endif
3725
3726 /* This yields a mask that user programs can use to figure out what
3727 instruction set this cpu supports. */
3728 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h
3729 --- linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3730 +++ linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3731 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3732 BTFIXUPDEF_INT(page_none)
3733 BTFIXUPDEF_INT(page_copy)
3734 BTFIXUPDEF_INT(page_readonly)
3735 +
3736 +#ifdef CONFIG_PAX_PAGEEXEC
3737 +BTFIXUPDEF_INT(page_shared_noexec)
3738 +BTFIXUPDEF_INT(page_copy_noexec)
3739 +BTFIXUPDEF_INT(page_readonly_noexec)
3740 +#endif
3741 +
3742 BTFIXUPDEF_INT(page_kernel)
3743
3744 #define PMD_SHIFT SUN4C_PMD_SHIFT
3745 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3746 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3747 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3748
3749 +#ifdef CONFIG_PAX_PAGEEXEC
3750 +extern pgprot_t PAGE_SHARED_NOEXEC;
3751 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3752 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3753 +#else
3754 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3755 +# define PAGE_COPY_NOEXEC PAGE_COPY
3756 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3757 +#endif
3758 +
3759 extern unsigned long page_kernel;
3760
3761 #ifdef MODULE
3762 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h
3763 --- linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
3764 +++ linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
3765 @@ -115,6 +115,13 @@
3766 SRMMU_EXEC | SRMMU_REF)
3767 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3768 SRMMU_EXEC | SRMMU_REF)
3769 +
3770 +#ifdef CONFIG_PAX_PAGEEXEC
3771 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3772 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3773 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3774 +#endif
3775 +
3776 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3777 SRMMU_DIRTY | SRMMU_REF)
3778
3779 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h
3780 --- linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
3781 +++ linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h 2011-05-04 17:56:20.000000000 -0400
3782 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
3783
3784 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3785
3786 -static void inline arch_read_lock(raw_rwlock_t *lock)
3787 +static inline void arch_read_lock(raw_rwlock_t *lock)
3788 {
3789 unsigned long tmp1, tmp2;
3790
3791 __asm__ __volatile__ (
3792 "1: ldsw [%2], %0\n"
3793 " brlz,pn %0, 2f\n"
3794 -"4: add %0, 1, %1\n"
3795 +"4: addcc %0, 1, %1\n"
3796 +
3797 +#ifdef CONFIG_PAX_REFCOUNT
3798 +" tvs %%icc, 6\n"
3799 +#endif
3800 +
3801 " cas [%2], %0, %1\n"
3802 " cmp %0, %1\n"
3803 " bne,pn %%icc, 1b\n"
3804 @@ -112,7 +117,7 @@ static void inline arch_read_lock(raw_rw
3805 " .previous"
3806 : "=&r" (tmp1), "=&r" (tmp2)
3807 : "r" (lock)
3808 - : "memory");
3809 + : "memory", "cc");
3810 }
3811
3812 static int inline arch_read_trylock(raw_rwlock_t *lock)
3813 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
3814 "1: ldsw [%2], %0\n"
3815 " brlz,a,pn %0, 2f\n"
3816 " mov 0, %0\n"
3817 -" add %0, 1, %1\n"
3818 +" addcc %0, 1, %1\n"
3819 +
3820 +#ifdef CONFIG_PAX_REFCOUNT
3821 +" tvs %%icc, 6\n"
3822 +#endif
3823 +
3824 " cas [%2], %0, %1\n"
3825 " cmp %0, %1\n"
3826 " bne,pn %%icc, 1b\n"
3827 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
3828 return tmp1;
3829 }
3830
3831 -static void inline arch_read_unlock(raw_rwlock_t *lock)
3832 +static inline void arch_read_unlock(raw_rwlock_t *lock)
3833 {
3834 unsigned long tmp1, tmp2;
3835
3836 __asm__ __volatile__(
3837 "1: lduw [%2], %0\n"
3838 -" sub %0, 1, %1\n"
3839 +" subcc %0, 1, %1\n"
3840 +
3841 +#ifdef CONFIG_PAX_REFCOUNT
3842 +" tvs %%icc, 6\n"
3843 +#endif
3844 +
3845 " cas [%2], %0, %1\n"
3846 " cmp %0, %1\n"
3847 " bne,pn %%xcc, 1b\n"
3848 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
3849 : "memory");
3850 }
3851
3852 -static void inline arch_write_lock(raw_rwlock_t *lock)
3853 +static inline void arch_write_lock(raw_rwlock_t *lock)
3854 {
3855 unsigned long mask, tmp1, tmp2;
3856
3857 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
3858 : "memory");
3859 }
3860
3861 -static void inline arch_write_unlock(raw_rwlock_t *lock)
3862 +static inline void arch_write_unlock(raw_rwlock_t *lock)
3863 {
3864 __asm__ __volatile__(
3865 " stw %%g0, [%0]"
3866 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h
3867 --- linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
3868 +++ linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
3869 @@ -50,6 +50,8 @@ struct thread_info {
3870 unsigned long w_saved;
3871
3872 struct restart_block restart_block;
3873 +
3874 + unsigned long lowest_stack;
3875 };
3876
3877 /*
3878 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h
3879 --- linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
3880 +++ linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
3881 @@ -68,6 +68,8 @@ struct thread_info {
3882 struct pt_regs *kern_una_regs;
3883 unsigned int kern_una_insn;
3884
3885 + unsigned long lowest_stack;
3886 +
3887 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3888 };
3889
3890 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h
3891 --- linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
3892 +++ linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
3893 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3894
3895 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3896 {
3897 - if (n && __access_ok((unsigned long) to, n))
3898 + if ((long)n < 0)
3899 + return n;
3900 +
3901 + if (n && __access_ok((unsigned long) to, n)) {
3902 + if (!__builtin_constant_p(n))
3903 + check_object_size(from, n, true);
3904 return __copy_user(to, (__force void __user *) from, n);
3905 - else
3906 + } else
3907 return n;
3908 }
3909
3910 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3911 {
3912 + if ((long)n < 0)
3913 + return n;
3914 +
3915 + if (!__builtin_constant_p(n))
3916 + check_object_size(from, n, true);
3917 +
3918 return __copy_user(to, (__force void __user *) from, n);
3919 }
3920
3921 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3922 {
3923 - if (n && __access_ok((unsigned long) from, n))
3924 + if ((long)n < 0)
3925 + return n;
3926 +
3927 + if (n && __access_ok((unsigned long) from, n)) {
3928 + if (!__builtin_constant_p(n))
3929 + check_object_size(to, n, false);
3930 return __copy_user((__force void __user *) to, from, n);
3931 - else
3932 + } else
3933 return n;
3934 }
3935
3936 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3937 {
3938 + if ((long)n < 0)
3939 + return n;
3940 +
3941 return __copy_user((__force void __user *) to, from, n);
3942 }
3943
3944 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h
3945 --- linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
3946 +++ linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
3947 @@ -9,6 +9,7 @@
3948 #include <linux/compiler.h>
3949 #include <linux/string.h>
3950 #include <linux/thread_info.h>
3951 +#include <linux/kernel.h>
3952 #include <asm/asi.h>
3953 #include <asm/system.h>
3954 #include <asm/spitfire.h>
3955 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
3956 static inline unsigned long __must_check
3957 copy_from_user(void *to, const void __user *from, unsigned long size)
3958 {
3959 - unsigned long ret = ___copy_from_user(to, from, size);
3960 + unsigned long ret;
3961
3962 + if ((long)size < 0 || size > INT_MAX)
3963 + return size;
3964 +
3965 + if (!__builtin_constant_p(size))
3966 + check_object_size(to, size, false);
3967 +
3968 + ret = ___copy_from_user(to, from, size);
3969 if (unlikely(ret))
3970 ret = copy_from_user_fixup(to, from, size);
3971 return ret;
3972 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
3973 static inline unsigned long __must_check
3974 copy_to_user(void __user *to, const void *from, unsigned long size)
3975 {
3976 - unsigned long ret = ___copy_to_user(to, from, size);
3977 + unsigned long ret;
3978 +
3979 + if ((long)size < 0 || size > INT_MAX)
3980 + return size;
3981 +
3982 + if (!__builtin_constant_p(size))
3983 + check_object_size(from, size, true);
3984
3985 + ret = ___copy_to_user(to, from, size);
3986 if (unlikely(ret))
3987 ret = copy_to_user_fixup(to, from, size);
3988 return ret;
3989 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/uaccess.h linux-2.6.32.42/arch/sparc/include/asm/uaccess.h
3990 --- linux-2.6.32.42/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3991 +++ linux-2.6.32.42/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
3992 @@ -1,5 +1,13 @@
3993 #ifndef ___ASM_SPARC_UACCESS_H
3994 #define ___ASM_SPARC_UACCESS_H
3995 +
3996 +#ifdef __KERNEL__
3997 +#ifndef __ASSEMBLY__
3998 +#include <linux/types.h>
3999 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4000 +#endif
4001 +#endif
4002 +
4003 #if defined(__sparc__) && defined(__arch64__)
4004 #include <asm/uaccess_64.h>
4005 #else
4006 diff -urNp linux-2.6.32.42/arch/sparc/kernel/iommu.c linux-2.6.32.42/arch/sparc/kernel/iommu.c
4007 --- linux-2.6.32.42/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4008 +++ linux-2.6.32.42/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4009 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4010 spin_unlock_irqrestore(&iommu->lock, flags);
4011 }
4012
4013 -static struct dma_map_ops sun4u_dma_ops = {
4014 +static const struct dma_map_ops sun4u_dma_ops = {
4015 .alloc_coherent = dma_4u_alloc_coherent,
4016 .free_coherent = dma_4u_free_coherent,
4017 .map_page = dma_4u_map_page,
4018 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4019 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4020 };
4021
4022 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4023 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4024 EXPORT_SYMBOL(dma_ops);
4025
4026 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4027 diff -urNp linux-2.6.32.42/arch/sparc/kernel/ioport.c linux-2.6.32.42/arch/sparc/kernel/ioport.c
4028 --- linux-2.6.32.42/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4029 +++ linux-2.6.32.42/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4030 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4031 BUG();
4032 }
4033
4034 -struct dma_map_ops sbus_dma_ops = {
4035 +const struct dma_map_ops sbus_dma_ops = {
4036 .alloc_coherent = sbus_alloc_coherent,
4037 .free_coherent = sbus_free_coherent,
4038 .map_page = sbus_map_page,
4039 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4040 .sync_sg_for_device = sbus_sync_sg_for_device,
4041 };
4042
4043 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
4044 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4045 EXPORT_SYMBOL(dma_ops);
4046
4047 static int __init sparc_register_ioport(void)
4048 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4049 }
4050 }
4051
4052 -struct dma_map_ops pci32_dma_ops = {
4053 +const struct dma_map_ops pci32_dma_ops = {
4054 .alloc_coherent = pci32_alloc_coherent,
4055 .free_coherent = pci32_free_coherent,
4056 .map_page = pci32_map_page,
4057 diff -urNp linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c
4058 --- linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4059 +++ linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4060 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4061 {
4062 }
4063
4064 -struct kgdb_arch arch_kgdb_ops = {
4065 +const struct kgdb_arch arch_kgdb_ops = {
4066 /* Breakpoint instruction: ta 0x7d */
4067 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4068 };
4069 diff -urNp linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c
4070 --- linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4071 +++ linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4072 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4073 {
4074 }
4075
4076 -struct kgdb_arch arch_kgdb_ops = {
4077 +const struct kgdb_arch arch_kgdb_ops = {
4078 /* Breakpoint instruction: ta 0x72 */
4079 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4080 };
4081 diff -urNp linux-2.6.32.42/arch/sparc/kernel/Makefile linux-2.6.32.42/arch/sparc/kernel/Makefile
4082 --- linux-2.6.32.42/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4083 +++ linux-2.6.32.42/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4084 @@ -3,7 +3,7 @@
4085 #
4086
4087 asflags-y := -ansi
4088 -ccflags-y := -Werror
4089 +#ccflags-y := -Werror
4090
4091 extra-y := head_$(BITS).o
4092 extra-y += init_task.o
4093 diff -urNp linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c
4094 --- linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4095 +++ linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4096 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4097 spin_unlock_irqrestore(&iommu->lock, flags);
4098 }
4099
4100 -static struct dma_map_ops sun4v_dma_ops = {
4101 +static const struct dma_map_ops sun4v_dma_ops = {
4102 .alloc_coherent = dma_4v_alloc_coherent,
4103 .free_coherent = dma_4v_free_coherent,
4104 .map_page = dma_4v_map_page,
4105 diff -urNp linux-2.6.32.42/arch/sparc/kernel/process_32.c linux-2.6.32.42/arch/sparc/kernel/process_32.c
4106 --- linux-2.6.32.42/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4107 +++ linux-2.6.32.42/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4108 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4109 rw->ins[4], rw->ins[5],
4110 rw->ins[6],
4111 rw->ins[7]);
4112 - printk("%pS\n", (void *) rw->ins[7]);
4113 + printk("%pA\n", (void *) rw->ins[7]);
4114 rw = (struct reg_window32 *) rw->ins[6];
4115 }
4116 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4117 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4118
4119 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4120 r->psr, r->pc, r->npc, r->y, print_tainted());
4121 - printk("PC: <%pS>\n", (void *) r->pc);
4122 + printk("PC: <%pA>\n", (void *) r->pc);
4123 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4124 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4125 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4126 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4127 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4128 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4129 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4130 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4131
4132 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4133 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4134 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4135 rw = (struct reg_window32 *) fp;
4136 pc = rw->ins[7];
4137 printk("[%08lx : ", pc);
4138 - printk("%pS ] ", (void *) pc);
4139 + printk("%pA ] ", (void *) pc);
4140 fp = rw->ins[6];
4141 } while (++count < 16);
4142 printk("\n");
4143 diff -urNp linux-2.6.32.42/arch/sparc/kernel/process_64.c linux-2.6.32.42/arch/sparc/kernel/process_64.c
4144 --- linux-2.6.32.42/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4145 +++ linux-2.6.32.42/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4146 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4147 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4148 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4149 if (regs->tstate & TSTATE_PRIV)
4150 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4151 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4152 }
4153
4154 void show_regs(struct pt_regs *regs)
4155 {
4156 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4157 regs->tpc, regs->tnpc, regs->y, print_tainted());
4158 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4159 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4160 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4161 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4162 regs->u_regs[3]);
4163 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4164 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4165 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4166 regs->u_regs[15]);
4167 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4168 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4169 show_regwindow(regs);
4170 }
4171
4172 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4173 ((tp && tp->task) ? tp->task->pid : -1));
4174
4175 if (gp->tstate & TSTATE_PRIV) {
4176 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4177 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4178 (void *) gp->tpc,
4179 (void *) gp->o7,
4180 (void *) gp->i7,
4181 diff -urNp linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c
4182 --- linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4183 +++ linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4184 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4185 if (ARCH_SUN4C && len > 0x20000000)
4186 return -ENOMEM;
4187 if (!addr)
4188 - addr = TASK_UNMAPPED_BASE;
4189 + addr = current->mm->mmap_base;
4190
4191 if (flags & MAP_SHARED)
4192 addr = COLOUR_ALIGN(addr);
4193 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4194 }
4195 if (TASK_SIZE - PAGE_SIZE - len < addr)
4196 return -ENOMEM;
4197 - if (!vmm || addr + len <= vmm->vm_start)
4198 + if (check_heap_stack_gap(vmm, addr, len))
4199 return addr;
4200 addr = vmm->vm_end;
4201 if (flags & MAP_SHARED)
4202 diff -urNp linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c
4203 --- linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4204 +++ linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4205 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4206 /* We do not accept a shared mapping if it would violate
4207 * cache aliasing constraints.
4208 */
4209 - if ((flags & MAP_SHARED) &&
4210 + if ((filp || (flags & MAP_SHARED)) &&
4211 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4212 return -EINVAL;
4213 return addr;
4214 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4215 if (filp || (flags & MAP_SHARED))
4216 do_color_align = 1;
4217
4218 +#ifdef CONFIG_PAX_RANDMMAP
4219 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4220 +#endif
4221 +
4222 if (addr) {
4223 if (do_color_align)
4224 addr = COLOUR_ALIGN(addr, pgoff);
4225 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4226 addr = PAGE_ALIGN(addr);
4227
4228 vma = find_vma(mm, addr);
4229 - if (task_size - len >= addr &&
4230 - (!vma || addr + len <= vma->vm_start))
4231 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4232 return addr;
4233 }
4234
4235 if (len > mm->cached_hole_size) {
4236 - start_addr = addr = mm->free_area_cache;
4237 + start_addr = addr = mm->free_area_cache;
4238 } else {
4239 - start_addr = addr = TASK_UNMAPPED_BASE;
4240 + start_addr = addr = mm->mmap_base;
4241 mm->cached_hole_size = 0;
4242 }
4243
4244 @@ -175,14 +178,14 @@ full_search:
4245 vma = find_vma(mm, VA_EXCLUDE_END);
4246 }
4247 if (unlikely(task_size < addr)) {
4248 - if (start_addr != TASK_UNMAPPED_BASE) {
4249 - start_addr = addr = TASK_UNMAPPED_BASE;
4250 + if (start_addr != mm->mmap_base) {
4251 + start_addr = addr = mm->mmap_base;
4252 mm->cached_hole_size = 0;
4253 goto full_search;
4254 }
4255 return -ENOMEM;
4256 }
4257 - if (likely(!vma || addr + len <= vma->vm_start)) {
4258 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4259 /*
4260 * Remember the place where we stopped the search:
4261 */
4262 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4263 /* We do not accept a shared mapping if it would violate
4264 * cache aliasing constraints.
4265 */
4266 - if ((flags & MAP_SHARED) &&
4267 + if ((filp || (flags & MAP_SHARED)) &&
4268 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4269 return -EINVAL;
4270 return addr;
4271 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4272 addr = PAGE_ALIGN(addr);
4273
4274 vma = find_vma(mm, addr);
4275 - if (task_size - len >= addr &&
4276 - (!vma || addr + len <= vma->vm_start))
4277 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4278 return addr;
4279 }
4280
4281 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4282 /* make sure it can fit in the remaining address space */
4283 if (likely(addr > len)) {
4284 vma = find_vma(mm, addr-len);
4285 - if (!vma || addr <= vma->vm_start) {
4286 + if (check_heap_stack_gap(vma, addr - len, len)) {
4287 /* remember the address as a hint for next time */
4288 return (mm->free_area_cache = addr-len);
4289 }
4290 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4291 if (unlikely(mm->mmap_base < len))
4292 goto bottomup;
4293
4294 - addr = mm->mmap_base-len;
4295 - if (do_color_align)
4296 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4297 + addr = mm->mmap_base - len;
4298
4299 do {
4300 + if (do_color_align)
4301 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4302 /*
4303 * Lookup failure means no vma is above this address,
4304 * else if new region fits below vma->vm_start,
4305 * return with success:
4306 */
4307 vma = find_vma(mm, addr);
4308 - if (likely(!vma || addr+len <= vma->vm_start)) {
4309 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4310 /* remember the address as a hint for next time */
4311 return (mm->free_area_cache = addr);
4312 }
4313 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4314 mm->cached_hole_size = vma->vm_start - addr;
4315
4316 /* try just below the current vma->vm_start */
4317 - addr = vma->vm_start-len;
4318 - if (do_color_align)
4319 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4320 - } while (likely(len < vma->vm_start));
4321 + addr = skip_heap_stack_gap(vma, len);
4322 + } while (!IS_ERR_VALUE(addr));
4323
4324 bottomup:
4325 /*
4326 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4327 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4328 sysctl_legacy_va_layout) {
4329 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4330 +
4331 +#ifdef CONFIG_PAX_RANDMMAP
4332 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4333 + mm->mmap_base += mm->delta_mmap;
4334 +#endif
4335 +
4336 mm->get_unmapped_area = arch_get_unmapped_area;
4337 mm->unmap_area = arch_unmap_area;
4338 } else {
4339 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4340 gap = (task_size / 6 * 5);
4341
4342 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4343 +
4344 +#ifdef CONFIG_PAX_RANDMMAP
4345 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4346 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4347 +#endif
4348 +
4349 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4350 mm->unmap_area = arch_unmap_area_topdown;
4351 }
4352 diff -urNp linux-2.6.32.42/arch/sparc/kernel/traps_32.c linux-2.6.32.42/arch/sparc/kernel/traps_32.c
4353 --- linux-2.6.32.42/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4354 +++ linux-2.6.32.42/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4355 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4356 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4357 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4358
4359 +extern void gr_handle_kernel_exploit(void);
4360 +
4361 void die_if_kernel(char *str, struct pt_regs *regs)
4362 {
4363 static int die_counter;
4364 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4365 count++ < 30 &&
4366 (((unsigned long) rw) >= PAGE_OFFSET) &&
4367 !(((unsigned long) rw) & 0x7)) {
4368 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4369 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4370 (void *) rw->ins[7]);
4371 rw = (struct reg_window32 *)rw->ins[6];
4372 }
4373 }
4374 printk("Instruction DUMP:");
4375 instruction_dump ((unsigned long *) regs->pc);
4376 - if(regs->psr & PSR_PS)
4377 + if(regs->psr & PSR_PS) {
4378 + gr_handle_kernel_exploit();
4379 do_exit(SIGKILL);
4380 + }
4381 do_exit(SIGSEGV);
4382 }
4383
4384 diff -urNp linux-2.6.32.42/arch/sparc/kernel/traps_64.c linux-2.6.32.42/arch/sparc/kernel/traps_64.c
4385 --- linux-2.6.32.42/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4386 +++ linux-2.6.32.42/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4387 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4388 i + 1,
4389 p->trapstack[i].tstate, p->trapstack[i].tpc,
4390 p->trapstack[i].tnpc, p->trapstack[i].tt);
4391 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4392 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4393 }
4394 }
4395
4396 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4397
4398 lvl -= 0x100;
4399 if (regs->tstate & TSTATE_PRIV) {
4400 +
4401 +#ifdef CONFIG_PAX_REFCOUNT
4402 + if (lvl == 6)
4403 + pax_report_refcount_overflow(regs);
4404 +#endif
4405 +
4406 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4407 die_if_kernel(buffer, regs);
4408 }
4409 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4410 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4411 {
4412 char buffer[32];
4413 -
4414 +
4415 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4416 0, lvl, SIGTRAP) == NOTIFY_STOP)
4417 return;
4418
4419 +#ifdef CONFIG_PAX_REFCOUNT
4420 + if (lvl == 6)
4421 + pax_report_refcount_overflow(regs);
4422 +#endif
4423 +
4424 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4425
4426 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4427 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4428 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4429 printk("%s" "ERROR(%d): ",
4430 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4431 - printk("TPC<%pS>\n", (void *) regs->tpc);
4432 + printk("TPC<%pA>\n", (void *) regs->tpc);
4433 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4434 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4435 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4436 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4437 smp_processor_id(),
4438 (type & 0x1) ? 'I' : 'D',
4439 regs->tpc);
4440 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4441 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4442 panic("Irrecoverable Cheetah+ parity error.");
4443 }
4444
4445 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4446 smp_processor_id(),
4447 (type & 0x1) ? 'I' : 'D',
4448 regs->tpc);
4449 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4450 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4451 }
4452
4453 struct sun4v_error_entry {
4454 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4455
4456 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4457 regs->tpc, tl);
4458 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4459 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4460 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4461 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4462 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4463 (void *) regs->u_regs[UREG_I7]);
4464 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4465 "pte[%lx] error[%lx]\n",
4466 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4467
4468 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4469 regs->tpc, tl);
4470 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4471 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4472 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4473 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4474 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4475 (void *) regs->u_regs[UREG_I7]);
4476 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4477 "pte[%lx] error[%lx]\n",
4478 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4479 fp = (unsigned long)sf->fp + STACK_BIAS;
4480 }
4481
4482 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4483 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4484 } while (++count < 16);
4485 }
4486
4487 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4488 return (struct reg_window *) (fp + STACK_BIAS);
4489 }
4490
4491 +extern void gr_handle_kernel_exploit(void);
4492 +
4493 void die_if_kernel(char *str, struct pt_regs *regs)
4494 {
4495 static int die_counter;
4496 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4497 while (rw &&
4498 count++ < 30&&
4499 is_kernel_stack(current, rw)) {
4500 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4501 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4502 (void *) rw->ins[7]);
4503
4504 rw = kernel_stack_up(rw);
4505 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4506 }
4507 user_instruction_dump ((unsigned int __user *) regs->tpc);
4508 }
4509 - if (regs->tstate & TSTATE_PRIV)
4510 + if (regs->tstate & TSTATE_PRIV) {
4511 + gr_handle_kernel_exploit();
4512 do_exit(SIGKILL);
4513 + }
4514 +
4515 do_exit(SIGSEGV);
4516 }
4517 EXPORT_SYMBOL(die_if_kernel);
4518 diff -urNp linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c
4519 --- linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4520 +++ linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4521 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4522 if (count < 5) {
4523 last_time = jiffies;
4524 count++;
4525 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4526 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4527 regs->tpc, (void *) regs->tpc);
4528 }
4529 }
4530 diff -urNp linux-2.6.32.42/arch/sparc/lib/atomic_64.S linux-2.6.32.42/arch/sparc/lib/atomic_64.S
4531 --- linux-2.6.32.42/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4532 +++ linux-2.6.32.42/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4533 @@ -18,7 +18,12 @@
4534 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4535 BACKOFF_SETUP(%o2)
4536 1: lduw [%o1], %g1
4537 - add %g1, %o0, %g7
4538 + addcc %g1, %o0, %g7
4539 +
4540 +#ifdef CONFIG_PAX_REFCOUNT
4541 + tvs %icc, 6
4542 +#endif
4543 +
4544 cas [%o1], %g1, %g7
4545 cmp %g1, %g7
4546 bne,pn %icc, 2f
4547 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4548 2: BACKOFF_SPIN(%o2, %o3, 1b)
4549 .size atomic_add, .-atomic_add
4550
4551 + .globl atomic_add_unchecked
4552 + .type atomic_add_unchecked,#function
4553 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4554 + BACKOFF_SETUP(%o2)
4555 +1: lduw [%o1], %g1
4556 + add %g1, %o0, %g7
4557 + cas [%o1], %g1, %g7
4558 + cmp %g1, %g7
4559 + bne,pn %icc, 2f
4560 + nop
4561 + retl
4562 + nop
4563 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4564 + .size atomic_add_unchecked, .-atomic_add_unchecked
4565 +
4566 .globl atomic_sub
4567 .type atomic_sub,#function
4568 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4569 BACKOFF_SETUP(%o2)
4570 1: lduw [%o1], %g1
4571 - sub %g1, %o0, %g7
4572 + subcc %g1, %o0, %g7
4573 +
4574 +#ifdef CONFIG_PAX_REFCOUNT
4575 + tvs %icc, 6
4576 +#endif
4577 +
4578 cas [%o1], %g1, %g7
4579 cmp %g1, %g7
4580 bne,pn %icc, 2f
4581 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4582 2: BACKOFF_SPIN(%o2, %o3, 1b)
4583 .size atomic_sub, .-atomic_sub
4584
4585 + .globl atomic_sub_unchecked
4586 + .type atomic_sub_unchecked,#function
4587 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4588 + BACKOFF_SETUP(%o2)
4589 +1: lduw [%o1], %g1
4590 + sub %g1, %o0, %g7
4591 + cas [%o1], %g1, %g7
4592 + cmp %g1, %g7
4593 + bne,pn %icc, 2f
4594 + nop
4595 + retl
4596 + nop
4597 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4598 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4599 +
4600 .globl atomic_add_ret
4601 .type atomic_add_ret,#function
4602 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4603 BACKOFF_SETUP(%o2)
4604 1: lduw [%o1], %g1
4605 - add %g1, %o0, %g7
4606 + addcc %g1, %o0, %g7
4607 +
4608 +#ifdef CONFIG_PAX_REFCOUNT
4609 + tvs %icc, 6
4610 +#endif
4611 +
4612 cas [%o1], %g1, %g7
4613 cmp %g1, %g7
4614 bne,pn %icc, 2f
4615 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4616 2: BACKOFF_SPIN(%o2, %o3, 1b)
4617 .size atomic_add_ret, .-atomic_add_ret
4618
4619 + .globl atomic_add_ret_unchecked
4620 + .type atomic_add_ret_unchecked,#function
4621 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4622 + BACKOFF_SETUP(%o2)
4623 +1: lduw [%o1], %g1
4624 + addcc %g1, %o0, %g7
4625 + cas [%o1], %g1, %g7
4626 + cmp %g1, %g7
4627 + bne,pn %icc, 2f
4628 + add %g7, %o0, %g7
4629 + sra %g7, 0, %o0
4630 + retl
4631 + nop
4632 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4633 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4634 +
4635 .globl atomic_sub_ret
4636 .type atomic_sub_ret,#function
4637 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4638 BACKOFF_SETUP(%o2)
4639 1: lduw [%o1], %g1
4640 - sub %g1, %o0, %g7
4641 + subcc %g1, %o0, %g7
4642 +
4643 +#ifdef CONFIG_PAX_REFCOUNT
4644 + tvs %icc, 6
4645 +#endif
4646 +
4647 cas [%o1], %g1, %g7
4648 cmp %g1, %g7
4649 bne,pn %icc, 2f
4650 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4651 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4652 BACKOFF_SETUP(%o2)
4653 1: ldx [%o1], %g1
4654 - add %g1, %o0, %g7
4655 + addcc %g1, %o0, %g7
4656 +
4657 +#ifdef CONFIG_PAX_REFCOUNT
4658 + tvs %xcc, 6
4659 +#endif
4660 +
4661 casx [%o1], %g1, %g7
4662 cmp %g1, %g7
4663 bne,pn %xcc, 2f
4664 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4665 2: BACKOFF_SPIN(%o2, %o3, 1b)
4666 .size atomic64_add, .-atomic64_add
4667
4668 + .globl atomic64_add_unchecked
4669 + .type atomic64_add_unchecked,#function
4670 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4671 + BACKOFF_SETUP(%o2)
4672 +1: ldx [%o1], %g1
4673 + addcc %g1, %o0, %g7
4674 + casx [%o1], %g1, %g7
4675 + cmp %g1, %g7
4676 + bne,pn %xcc, 2f
4677 + nop
4678 + retl
4679 + nop
4680 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4681 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4682 +
4683 .globl atomic64_sub
4684 .type atomic64_sub,#function
4685 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4686 BACKOFF_SETUP(%o2)
4687 1: ldx [%o1], %g1
4688 - sub %g1, %o0, %g7
4689 + subcc %g1, %o0, %g7
4690 +
4691 +#ifdef CONFIG_PAX_REFCOUNT
4692 + tvs %xcc, 6
4693 +#endif
4694 +
4695 casx [%o1], %g1, %g7
4696 cmp %g1, %g7
4697 bne,pn %xcc, 2f
4698 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4699 2: BACKOFF_SPIN(%o2, %o3, 1b)
4700 .size atomic64_sub, .-atomic64_sub
4701
4702 + .globl atomic64_sub_unchecked
4703 + .type atomic64_sub_unchecked,#function
4704 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4705 + BACKOFF_SETUP(%o2)
4706 +1: ldx [%o1], %g1
4707 + subcc %g1, %o0, %g7
4708 + casx [%o1], %g1, %g7
4709 + cmp %g1, %g7
4710 + bne,pn %xcc, 2f
4711 + nop
4712 + retl
4713 + nop
4714 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4715 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4716 +
4717 .globl atomic64_add_ret
4718 .type atomic64_add_ret,#function
4719 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4720 BACKOFF_SETUP(%o2)
4721 1: ldx [%o1], %g1
4722 - add %g1, %o0, %g7
4723 + addcc %g1, %o0, %g7
4724 +
4725 +#ifdef CONFIG_PAX_REFCOUNT
4726 + tvs %xcc, 6
4727 +#endif
4728 +
4729 casx [%o1], %g1, %g7
4730 cmp %g1, %g7
4731 bne,pn %xcc, 2f
4732 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4733 2: BACKOFF_SPIN(%o2, %o3, 1b)
4734 .size atomic64_add_ret, .-atomic64_add_ret
4735
4736 + .globl atomic64_add_ret_unchecked
4737 + .type atomic64_add_ret_unchecked,#function
4738 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4739 + BACKOFF_SETUP(%o2)
4740 +1: ldx [%o1], %g1
4741 + addcc %g1, %o0, %g7
4742 + casx [%o1], %g1, %g7
4743 + cmp %g1, %g7
4744 + bne,pn %xcc, 2f
4745 + add %g7, %o0, %g7
4746 + mov %g7, %o0
4747 + retl
4748 + nop
4749 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4750 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4751 +
4752 .globl atomic64_sub_ret
4753 .type atomic64_sub_ret,#function
4754 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4755 BACKOFF_SETUP(%o2)
4756 1: ldx [%o1], %g1
4757 - sub %g1, %o0, %g7
4758 + subcc %g1, %o0, %g7
4759 +
4760 +#ifdef CONFIG_PAX_REFCOUNT
4761 + tvs %xcc, 6
4762 +#endif
4763 +
4764 casx [%o1], %g1, %g7
4765 cmp %g1, %g7
4766 bne,pn %xcc, 2f
4767 diff -urNp linux-2.6.32.42/arch/sparc/lib/ksyms.c linux-2.6.32.42/arch/sparc/lib/ksyms.c
4768 --- linux-2.6.32.42/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
4769 +++ linux-2.6.32.42/arch/sparc/lib/ksyms.c 2011-04-17 15:56:46.000000000 -0400
4770 @@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
4771
4772 /* Atomic counter implementation. */
4773 EXPORT_SYMBOL(atomic_add);
4774 +EXPORT_SYMBOL(atomic_add_unchecked);
4775 EXPORT_SYMBOL(atomic_add_ret);
4776 EXPORT_SYMBOL(atomic_sub);
4777 +EXPORT_SYMBOL(atomic_sub_unchecked);
4778 EXPORT_SYMBOL(atomic_sub_ret);
4779 EXPORT_SYMBOL(atomic64_add);
4780 +EXPORT_SYMBOL(atomic64_add_unchecked);
4781 EXPORT_SYMBOL(atomic64_add_ret);
4782 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4783 EXPORT_SYMBOL(atomic64_sub);
4784 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4785 EXPORT_SYMBOL(atomic64_sub_ret);
4786
4787 /* Atomic bit operations. */
4788 diff -urNp linux-2.6.32.42/arch/sparc/lib/Makefile linux-2.6.32.42/arch/sparc/lib/Makefile
4789 --- linux-2.6.32.42/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
4790 +++ linux-2.6.32.42/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
4791 @@ -2,7 +2,7 @@
4792 #
4793
4794 asflags-y := -ansi -DST_DIV0=0x02
4795 -ccflags-y := -Werror
4796 +#ccflags-y := -Werror
4797
4798 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4799 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4800 diff -urNp linux-2.6.32.42/arch/sparc/lib/rwsem_64.S linux-2.6.32.42/arch/sparc/lib/rwsem_64.S
4801 --- linux-2.6.32.42/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
4802 +++ linux-2.6.32.42/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
4803 @@ -11,7 +11,12 @@
4804 .globl __down_read
4805 __down_read:
4806 1: lduw [%o0], %g1
4807 - add %g1, 1, %g7
4808 + addcc %g1, 1, %g7
4809 +
4810 +#ifdef CONFIG_PAX_REFCOUNT
4811 + tvs %icc, 6
4812 +#endif
4813 +
4814 cas [%o0], %g1, %g7
4815 cmp %g1, %g7
4816 bne,pn %icc, 1b
4817 @@ -33,7 +38,12 @@ __down_read:
4818 .globl __down_read_trylock
4819 __down_read_trylock:
4820 1: lduw [%o0], %g1
4821 - add %g1, 1, %g7
4822 + addcc %g1, 1, %g7
4823 +
4824 +#ifdef CONFIG_PAX_REFCOUNT
4825 + tvs %icc, 6
4826 +#endif
4827 +
4828 cmp %g7, 0
4829 bl,pn %icc, 2f
4830 mov 0, %o1
4831 @@ -51,7 +61,12 @@ __down_write:
4832 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4833 1:
4834 lduw [%o0], %g3
4835 - add %g3, %g1, %g7
4836 + addcc %g3, %g1, %g7
4837 +
4838 +#ifdef CONFIG_PAX_REFCOUNT
4839 + tvs %icc, 6
4840 +#endif
4841 +
4842 cas [%o0], %g3, %g7
4843 cmp %g3, %g7
4844 bne,pn %icc, 1b
4845 @@ -77,7 +92,12 @@ __down_write_trylock:
4846 cmp %g3, 0
4847 bne,pn %icc, 2f
4848 mov 0, %o1
4849 - add %g3, %g1, %g7
4850 + addcc %g3, %g1, %g7
4851 +
4852 +#ifdef CONFIG_PAX_REFCOUNT
4853 + tvs %icc, 6
4854 +#endif
4855 +
4856 cas [%o0], %g3, %g7
4857 cmp %g3, %g7
4858 bne,pn %icc, 1b
4859 @@ -90,7 +110,12 @@ __down_write_trylock:
4860 __up_read:
4861 1:
4862 lduw [%o0], %g1
4863 - sub %g1, 1, %g7
4864 + subcc %g1, 1, %g7
4865 +
4866 +#ifdef CONFIG_PAX_REFCOUNT
4867 + tvs %icc, 6
4868 +#endif
4869 +
4870 cas [%o0], %g1, %g7
4871 cmp %g1, %g7
4872 bne,pn %icc, 1b
4873 @@ -118,7 +143,12 @@ __up_write:
4874 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4875 1:
4876 lduw [%o0], %g3
4877 - sub %g3, %g1, %g7
4878 + subcc %g3, %g1, %g7
4879 +
4880 +#ifdef CONFIG_PAX_REFCOUNT
4881 + tvs %icc, 6
4882 +#endif
4883 +
4884 cas [%o0], %g3, %g7
4885 cmp %g3, %g7
4886 bne,pn %icc, 1b
4887 @@ -143,7 +173,12 @@ __downgrade_write:
4888 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
4889 1:
4890 lduw [%o0], %g3
4891 - sub %g3, %g1, %g7
4892 + subcc %g3, %g1, %g7
4893 +
4894 +#ifdef CONFIG_PAX_REFCOUNT
4895 + tvs %icc, 6
4896 +#endif
4897 +
4898 cas [%o0], %g3, %g7
4899 cmp %g3, %g7
4900 bne,pn %icc, 1b
4901 diff -urNp linux-2.6.32.42/arch/sparc/Makefile linux-2.6.32.42/arch/sparc/Makefile
4902 --- linux-2.6.32.42/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
4903 +++ linux-2.6.32.42/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
4904 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4905 # Export what is needed by arch/sparc/boot/Makefile
4906 export VMLINUX_INIT VMLINUX_MAIN
4907 VMLINUX_INIT := $(head-y) $(init-y)
4908 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4909 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4910 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4911 VMLINUX_MAIN += $(drivers-y) $(net-y)
4912
4913 diff -urNp linux-2.6.32.42/arch/sparc/mm/fault_32.c linux-2.6.32.42/arch/sparc/mm/fault_32.c
4914 --- linux-2.6.32.42/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
4915 +++ linux-2.6.32.42/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
4916 @@ -21,6 +21,9 @@
4917 #include <linux/interrupt.h>
4918 #include <linux/module.h>
4919 #include <linux/kdebug.h>
4920 +#include <linux/slab.h>
4921 +#include <linux/pagemap.h>
4922 +#include <linux/compiler.h>
4923
4924 #include <asm/system.h>
4925 #include <asm/page.h>
4926 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
4927 return safe_compute_effective_address(regs, insn);
4928 }
4929
4930 +#ifdef CONFIG_PAX_PAGEEXEC
4931 +#ifdef CONFIG_PAX_DLRESOLVE
4932 +static void pax_emuplt_close(struct vm_area_struct *vma)
4933 +{
4934 + vma->vm_mm->call_dl_resolve = 0UL;
4935 +}
4936 +
4937 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4938 +{
4939 + unsigned int *kaddr;
4940 +
4941 + vmf->page = alloc_page(GFP_HIGHUSER);
4942 + if (!vmf->page)
4943 + return VM_FAULT_OOM;
4944 +
4945 + kaddr = kmap(vmf->page);
4946 + memset(kaddr, 0, PAGE_SIZE);
4947 + kaddr[0] = 0x9DE3BFA8U; /* save */
4948 + flush_dcache_page(vmf->page);
4949 + kunmap(vmf->page);
4950 + return VM_FAULT_MAJOR;
4951 +}
4952 +
4953 +static const struct vm_operations_struct pax_vm_ops = {
4954 + .close = pax_emuplt_close,
4955 + .fault = pax_emuplt_fault
4956 +};
4957 +
4958 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4959 +{
4960 + int ret;
4961 +
4962 + vma->vm_mm = current->mm;
4963 + vma->vm_start = addr;
4964 + vma->vm_end = addr + PAGE_SIZE;
4965 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4966 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4967 + vma->vm_ops = &pax_vm_ops;
4968 +
4969 + ret = insert_vm_struct(current->mm, vma);
4970 + if (ret)
4971 + return ret;
4972 +
4973 + ++current->mm->total_vm;
4974 + return 0;
4975 +}
4976 +#endif
4977 +
4978 +/*
4979 + * PaX: decide what to do with offenders (regs->pc = fault address)
4980 + *
4981 + * returns 1 when task should be killed
4982 + * 2 when patched PLT trampoline was detected
4983 + * 3 when unpatched PLT trampoline was detected
4984 + */
4985 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4986 +{
4987 +
4988 +#ifdef CONFIG_PAX_EMUPLT
4989 + int err;
4990 +
4991 + do { /* PaX: patched PLT emulation #1 */
4992 + unsigned int sethi1, sethi2, jmpl;
4993 +
4994 + err = get_user(sethi1, (unsigned int *)regs->pc);
4995 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4996 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4997 +
4998 + if (err)
4999 + break;
5000 +
5001 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5002 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5003 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5004 + {
5005 + unsigned int addr;
5006 +
5007 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5008 + addr = regs->u_regs[UREG_G1];
5009 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5010 + regs->pc = addr;
5011 + regs->npc = addr+4;
5012 + return 2;
5013 + }
5014 + } while (0);
5015 +
5016 + { /* PaX: patched PLT emulation #2 */
5017 + unsigned int ba;
5018 +
5019 + err = get_user(ba, (unsigned int *)regs->pc);
5020 +
5021 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5022 + unsigned int addr;
5023 +
5024 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5025 + regs->pc = addr;
5026 + regs->npc = addr+4;
5027 + return 2;
5028 + }
5029 + }
5030 +
5031 + do { /* PaX: patched PLT emulation #3 */
5032 + unsigned int sethi, jmpl, nop;
5033 +
5034 + err = get_user(sethi, (unsigned int *)regs->pc);
5035 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5036 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5037 +
5038 + if (err)
5039 + break;
5040 +
5041 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5042 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5043 + nop == 0x01000000U)
5044 + {
5045 + unsigned int addr;
5046 +
5047 + addr = (sethi & 0x003FFFFFU) << 10;
5048 + regs->u_regs[UREG_G1] = addr;
5049 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5050 + regs->pc = addr;
5051 + regs->npc = addr+4;
5052 + return 2;
5053 + }
5054 + } while (0);
5055 +
5056 + do { /* PaX: unpatched PLT emulation step 1 */
5057 + unsigned int sethi, ba, nop;
5058 +
5059 + err = get_user(sethi, (unsigned int *)regs->pc);
5060 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
5061 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5062 +
5063 + if (err)
5064 + break;
5065 +
5066 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5067 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5068 + nop == 0x01000000U)
5069 + {
5070 + unsigned int addr, save, call;
5071 +
5072 + if ((ba & 0xFFC00000U) == 0x30800000U)
5073 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5074 + else
5075 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5076 +
5077 + err = get_user(save, (unsigned int *)addr);
5078 + err |= get_user(call, (unsigned int *)(addr+4));
5079 + err |= get_user(nop, (unsigned int *)(addr+8));
5080 + if (err)
5081 + break;
5082 +
5083 +#ifdef CONFIG_PAX_DLRESOLVE
5084 + if (save == 0x9DE3BFA8U &&
5085 + (call & 0xC0000000U) == 0x40000000U &&
5086 + nop == 0x01000000U)
5087 + {
5088 + struct vm_area_struct *vma;
5089 + unsigned long call_dl_resolve;
5090 +
5091 + down_read(&current->mm->mmap_sem);
5092 + call_dl_resolve = current->mm->call_dl_resolve;
5093 + up_read(&current->mm->mmap_sem);
5094 + if (likely(call_dl_resolve))
5095 + goto emulate;
5096 +
5097 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5098 +
5099 + down_write(&current->mm->mmap_sem);
5100 + if (current->mm->call_dl_resolve) {
5101 + call_dl_resolve = current->mm->call_dl_resolve;
5102 + up_write(&current->mm->mmap_sem);
5103 + if (vma)
5104 + kmem_cache_free(vm_area_cachep, vma);
5105 + goto emulate;
5106 + }
5107 +
5108 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5109 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5110 + up_write(&current->mm->mmap_sem);
5111 + if (vma)
5112 + kmem_cache_free(vm_area_cachep, vma);
5113 + return 1;
5114 + }
5115 +
5116 + if (pax_insert_vma(vma, call_dl_resolve)) {
5117 + up_write(&current->mm->mmap_sem);
5118 + kmem_cache_free(vm_area_cachep, vma);
5119 + return 1;
5120 + }
5121 +
5122 + current->mm->call_dl_resolve = call_dl_resolve;
5123 + up_write(&current->mm->mmap_sem);
5124 +
5125 +emulate:
5126 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5127 + regs->pc = call_dl_resolve;
5128 + regs->npc = addr+4;
5129 + return 3;
5130 + }
5131 +#endif
5132 +
5133 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5134 + if ((save & 0xFFC00000U) == 0x05000000U &&
5135 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5136 + nop == 0x01000000U)
5137 + {
5138 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5139 + regs->u_regs[UREG_G2] = addr + 4;
5140 + addr = (save & 0x003FFFFFU) << 10;
5141 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5142 + regs->pc = addr;
5143 + regs->npc = addr+4;
5144 + return 3;
5145 + }
5146 + }
5147 + } while (0);
5148 +
5149 + do { /* PaX: unpatched PLT emulation step 2 */
5150 + unsigned int save, call, nop;
5151 +
5152 + err = get_user(save, (unsigned int *)(regs->pc-4));
5153 + err |= get_user(call, (unsigned int *)regs->pc);
5154 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5155 + if (err)
5156 + break;
5157 +
5158 + if (save == 0x9DE3BFA8U &&
5159 + (call & 0xC0000000U) == 0x40000000U &&
5160 + nop == 0x01000000U)
5161 + {
5162 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5163 +
5164 + regs->u_regs[UREG_RETPC] = regs->pc;
5165 + regs->pc = dl_resolve;
5166 + regs->npc = dl_resolve+4;
5167 + return 3;
5168 + }
5169 + } while (0);
5170 +#endif
5171 +
5172 + return 1;
5173 +}
5174 +
5175 +void pax_report_insns(void *pc, void *sp)
5176 +{
5177 + unsigned long i;
5178 +
5179 + printk(KERN_ERR "PAX: bytes at PC: ");
5180 + for (i = 0; i < 8; i++) {
5181 + unsigned int c;
5182 + if (get_user(c, (unsigned int *)pc+i))
5183 + printk(KERN_CONT "???????? ");
5184 + else
5185 + printk(KERN_CONT "%08x ", c);
5186 + }
5187 + printk("\n");
5188 +}
5189 +#endif
5190 +
5191 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5192 unsigned long address)
5193 {
5194 @@ -231,6 +495,24 @@ good_area:
5195 if(!(vma->vm_flags & VM_WRITE))
5196 goto bad_area;
5197 } else {
5198 +
5199 +#ifdef CONFIG_PAX_PAGEEXEC
5200 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5201 + up_read(&mm->mmap_sem);
5202 + switch (pax_handle_fetch_fault(regs)) {
5203 +
5204 +#ifdef CONFIG_PAX_EMUPLT
5205 + case 2:
5206 + case 3:
5207 + return;
5208 +#endif
5209 +
5210 + }
5211 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5212 + do_group_exit(SIGKILL);
5213 + }
5214 +#endif
5215 +
5216 /* Allow reads even for write-only mappings */
5217 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5218 goto bad_area;
5219 diff -urNp linux-2.6.32.42/arch/sparc/mm/fault_64.c linux-2.6.32.42/arch/sparc/mm/fault_64.c
5220 --- linux-2.6.32.42/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5221 +++ linux-2.6.32.42/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5222 @@ -20,6 +20,9 @@
5223 #include <linux/kprobes.h>
5224 #include <linux/kdebug.h>
5225 #include <linux/percpu.h>
5226 +#include <linux/slab.h>
5227 +#include <linux/pagemap.h>
5228 +#include <linux/compiler.h>
5229
5230 #include <asm/page.h>
5231 #include <asm/pgtable.h>
5232 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5233 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5234 regs->tpc);
5235 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5236 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5237 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5238 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5239 dump_stack();
5240 unhandled_fault(regs->tpc, current, regs);
5241 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5242 show_regs(regs);
5243 }
5244
5245 +#ifdef CONFIG_PAX_PAGEEXEC
5246 +#ifdef CONFIG_PAX_DLRESOLVE
5247 +static void pax_emuplt_close(struct vm_area_struct *vma)
5248 +{
5249 + vma->vm_mm->call_dl_resolve = 0UL;
5250 +}
5251 +
5252 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5253 +{
5254 + unsigned int *kaddr;
5255 +
5256 + vmf->page = alloc_page(GFP_HIGHUSER);
5257 + if (!vmf->page)
5258 + return VM_FAULT_OOM;
5259 +
5260 + kaddr = kmap(vmf->page);
5261 + memset(kaddr, 0, PAGE_SIZE);
5262 + kaddr[0] = 0x9DE3BFA8U; /* save */
5263 + flush_dcache_page(vmf->page);
5264 + kunmap(vmf->page);
5265 + return VM_FAULT_MAJOR;
5266 +}
5267 +
5268 +static const struct vm_operations_struct pax_vm_ops = {
5269 + .close = pax_emuplt_close,
5270 + .fault = pax_emuplt_fault
5271 +};
5272 +
5273 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5274 +{
5275 + int ret;
5276 +
5277 + vma->vm_mm = current->mm;
5278 + vma->vm_start = addr;
5279 + vma->vm_end = addr + PAGE_SIZE;
5280 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5281 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5282 + vma->vm_ops = &pax_vm_ops;
5283 +
5284 + ret = insert_vm_struct(current->mm, vma);
5285 + if (ret)
5286 + return ret;
5287 +
5288 + ++current->mm->total_vm;
5289 + return 0;
5290 +}
5291 +#endif
5292 +
5293 +/*
5294 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5295 + *
5296 + * returns 1 when task should be killed
5297 + * 2 when patched PLT trampoline was detected
5298 + * 3 when unpatched PLT trampoline was detected
5299 + */
5300 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5301 +{
5302 +
5303 +#ifdef CONFIG_PAX_EMUPLT
5304 + int err;
5305 +
5306 + do { /* PaX: patched PLT emulation #1 */
5307 + unsigned int sethi1, sethi2, jmpl;
5308 +
5309 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5310 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5311 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5312 +
5313 + if (err)
5314 + break;
5315 +
5316 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5317 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5318 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5319 + {
5320 + unsigned long addr;
5321 +
5322 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5323 + addr = regs->u_regs[UREG_G1];
5324 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5325 +
5326 + if (test_thread_flag(TIF_32BIT))
5327 + addr &= 0xFFFFFFFFUL;
5328 +
5329 + regs->tpc = addr;
5330 + regs->tnpc = addr+4;
5331 + return 2;
5332 + }
5333 + } while (0);
5334 +
5335 + { /* PaX: patched PLT emulation #2 */
5336 + unsigned int ba;
5337 +
5338 + err = get_user(ba, (unsigned int *)regs->tpc);
5339 +
5340 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5341 + unsigned long addr;
5342 +
5343 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5344 +
5345 + if (test_thread_flag(TIF_32BIT))
5346 + addr &= 0xFFFFFFFFUL;
5347 +
5348 + regs->tpc = addr;
5349 + regs->tnpc = addr+4;
5350 + return 2;
5351 + }
5352 + }
5353 +
5354 + do { /* PaX: patched PLT emulation #3 */
5355 + unsigned int sethi, jmpl, nop;
5356 +
5357 + err = get_user(sethi, (unsigned int *)regs->tpc);
5358 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5359 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5360 +
5361 + if (err)
5362 + break;
5363 +
5364 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5365 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5366 + nop == 0x01000000U)
5367 + {
5368 + unsigned long addr;
5369 +
5370 + addr = (sethi & 0x003FFFFFU) << 10;
5371 + regs->u_regs[UREG_G1] = addr;
5372 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5373 +
5374 + if (test_thread_flag(TIF_32BIT))
5375 + addr &= 0xFFFFFFFFUL;
5376 +
5377 + regs->tpc = addr;
5378 + regs->tnpc = addr+4;
5379 + return 2;
5380 + }
5381 + } while (0);
5382 +
5383 + do { /* PaX: patched PLT emulation #4 */
5384 + unsigned int sethi, mov1, call, mov2;
5385 +
5386 + err = get_user(sethi, (unsigned int *)regs->tpc);
5387 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5388 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5389 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5390 +
5391 + if (err)
5392 + break;
5393 +
5394 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5395 + mov1 == 0x8210000FU &&
5396 + (call & 0xC0000000U) == 0x40000000U &&
5397 + mov2 == 0x9E100001U)
5398 + {
5399 + unsigned long addr;
5400 +
5401 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5402 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5403 +
5404 + if (test_thread_flag(TIF_32BIT))
5405 + addr &= 0xFFFFFFFFUL;
5406 +
5407 + regs->tpc = addr;
5408 + regs->tnpc = addr+4;
5409 + return 2;
5410 + }
5411 + } while (0);
5412 +
5413 + do { /* PaX: patched PLT emulation #5 */
5414 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5415 +
5416 + err = get_user(sethi, (unsigned int *)regs->tpc);
5417 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5418 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5419 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5420 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5421 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5422 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5423 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5424 +
5425 + if (err)
5426 + break;
5427 +
5428 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5429 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5430 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5431 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5432 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5433 + sllx == 0x83287020U &&
5434 + jmpl == 0x81C04005U &&
5435 + nop == 0x01000000U)
5436 + {
5437 + unsigned long addr;
5438 +
5439 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5440 + regs->u_regs[UREG_G1] <<= 32;
5441 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5442 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5443 + regs->tpc = addr;
5444 + regs->tnpc = addr+4;
5445 + return 2;
5446 + }
5447 + } while (0);
5448 +
5449 + do { /* PaX: patched PLT emulation #6 */
5450 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5451 +
5452 + err = get_user(sethi, (unsigned int *)regs->tpc);
5453 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5454 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5455 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5456 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5457 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5458 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5459 +
5460 + if (err)
5461 + break;
5462 +
5463 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5464 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5465 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5466 + sllx == 0x83287020U &&
5467 + (or & 0xFFFFE000U) == 0x8A116000U &&
5468 + jmpl == 0x81C04005U &&
5469 + nop == 0x01000000U)
5470 + {
5471 + unsigned long addr;
5472 +
5473 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5474 + regs->u_regs[UREG_G1] <<= 32;
5475 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5476 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5477 + regs->tpc = addr;
5478 + regs->tnpc = addr+4;
5479 + return 2;
5480 + }
5481 + } while (0);
5482 +
5483 + do { /* PaX: unpatched PLT emulation step 1 */
5484 + unsigned int sethi, ba, nop;
5485 +
5486 + err = get_user(sethi, (unsigned int *)regs->tpc);
5487 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5488 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5489 +
5490 + if (err)
5491 + break;
5492 +
5493 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5494 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5495 + nop == 0x01000000U)
5496 + {
5497 + unsigned long addr;
5498 + unsigned int save, call;
5499 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5500 +
5501 + if ((ba & 0xFFC00000U) == 0x30800000U)
5502 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5503 + else
5504 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5505 +
5506 + if (test_thread_flag(TIF_32BIT))
5507 + addr &= 0xFFFFFFFFUL;
5508 +
5509 + err = get_user(save, (unsigned int *)addr);
5510 + err |= get_user(call, (unsigned int *)(addr+4));
5511 + err |= get_user(nop, (unsigned int *)(addr+8));
5512 + if (err)
5513 + break;
5514 +
5515 +#ifdef CONFIG_PAX_DLRESOLVE
5516 + if (save == 0x9DE3BFA8U &&
5517 + (call & 0xC0000000U) == 0x40000000U &&
5518 + nop == 0x01000000U)
5519 + {
5520 + struct vm_area_struct *vma;
5521 + unsigned long call_dl_resolve;
5522 +
5523 + down_read(&current->mm->mmap_sem);
5524 + call_dl_resolve = current->mm->call_dl_resolve;
5525 + up_read(&current->mm->mmap_sem);
5526 + if (likely(call_dl_resolve))
5527 + goto emulate;
5528 +
5529 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5530 +
5531 + down_write(&current->mm->mmap_sem);
5532 + if (current->mm->call_dl_resolve) {
5533 + call_dl_resolve = current->mm->call_dl_resolve;
5534 + up_write(&current->mm->mmap_sem);
5535 + if (vma)
5536 + kmem_cache_free(vm_area_cachep, vma);
5537 + goto emulate;
5538 + }
5539 +
5540 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5541 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5542 + up_write(&current->mm->mmap_sem);
5543 + if (vma)
5544 + kmem_cache_free(vm_area_cachep, vma);
5545 + return 1;
5546 + }
5547 +
5548 + if (pax_insert_vma(vma, call_dl_resolve)) {
5549 + up_write(&current->mm->mmap_sem);
5550 + kmem_cache_free(vm_area_cachep, vma);
5551 + return 1;
5552 + }
5553 +
5554 + current->mm->call_dl_resolve = call_dl_resolve;
5555 + up_write(&current->mm->mmap_sem);
5556 +
5557 +emulate:
5558 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5559 + regs->tpc = call_dl_resolve;
5560 + regs->tnpc = addr+4;
5561 + return 3;
5562 + }
5563 +#endif
5564 +
5565 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5566 + if ((save & 0xFFC00000U) == 0x05000000U &&
5567 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5568 + nop == 0x01000000U)
5569 + {
5570 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5571 + regs->u_regs[UREG_G2] = addr + 4;
5572 + addr = (save & 0x003FFFFFU) << 10;
5573 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5574 +
5575 + if (test_thread_flag(TIF_32BIT))
5576 + addr &= 0xFFFFFFFFUL;
5577 +
5578 + regs->tpc = addr;
5579 + regs->tnpc = addr+4;
5580 + return 3;
5581 + }
5582 +
5583 + /* PaX: 64-bit PLT stub */
5584 + err = get_user(sethi1, (unsigned int *)addr);
5585 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5586 + err |= get_user(or1, (unsigned int *)(addr+8));
5587 + err |= get_user(or2, (unsigned int *)(addr+12));
5588 + err |= get_user(sllx, (unsigned int *)(addr+16));
5589 + err |= get_user(add, (unsigned int *)(addr+20));
5590 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5591 + err |= get_user(nop, (unsigned int *)(addr+28));
5592 + if (err)
5593 + break;
5594 +
5595 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5596 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5597 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5598 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5599 + sllx == 0x89293020U &&
5600 + add == 0x8A010005U &&
5601 + jmpl == 0x89C14000U &&
5602 + nop == 0x01000000U)
5603 + {
5604 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5605 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5606 + regs->u_regs[UREG_G4] <<= 32;
5607 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5608 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5609 + regs->u_regs[UREG_G4] = addr + 24;
5610 + addr = regs->u_regs[UREG_G5];
5611 + regs->tpc = addr;
5612 + regs->tnpc = addr+4;
5613 + return 3;
5614 + }
5615 + }
5616 + } while (0);
5617 +
5618 +#ifdef CONFIG_PAX_DLRESOLVE
5619 + do { /* PaX: unpatched PLT emulation step 2 */
5620 + unsigned int save, call, nop;
5621 +
5622 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5623 + err |= get_user(call, (unsigned int *)regs->tpc);
5624 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5625 + if (err)
5626 + break;
5627 +
5628 + if (save == 0x9DE3BFA8U &&
5629 + (call & 0xC0000000U) == 0x40000000U &&
5630 + nop == 0x01000000U)
5631 + {
5632 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5633 +
5634 + if (test_thread_flag(TIF_32BIT))
5635 + dl_resolve &= 0xFFFFFFFFUL;
5636 +
5637 + regs->u_regs[UREG_RETPC] = regs->tpc;
5638 + regs->tpc = dl_resolve;
5639 + regs->tnpc = dl_resolve+4;
5640 + return 3;
5641 + }
5642 + } while (0);
5643 +#endif
5644 +
5645 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5646 + unsigned int sethi, ba, nop;
5647 +
5648 + err = get_user(sethi, (unsigned int *)regs->tpc);
5649 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5650 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5651 +
5652 + if (err)
5653 + break;
5654 +
5655 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5656 + (ba & 0xFFF00000U) == 0x30600000U &&
5657 + nop == 0x01000000U)
5658 + {
5659 + unsigned long addr;
5660 +
5661 + addr = (sethi & 0x003FFFFFU) << 10;
5662 + regs->u_regs[UREG_G1] = addr;
5663 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5664 +
5665 + if (test_thread_flag(TIF_32BIT))
5666 + addr &= 0xFFFFFFFFUL;
5667 +
5668 + regs->tpc = addr;
5669 + regs->tnpc = addr+4;
5670 + return 2;
5671 + }
5672 + } while (0);
5673 +
5674 +#endif
5675 +
5676 + return 1;
5677 +}
5678 +
5679 +void pax_report_insns(void *pc, void *sp)
5680 +{
5681 + unsigned long i;
5682 +
5683 + printk(KERN_ERR "PAX: bytes at PC: ");
5684 + for (i = 0; i < 8; i++) {
5685 + unsigned int c;
5686 + if (get_user(c, (unsigned int *)pc+i))
5687 + printk(KERN_CONT "???????? ");
5688 + else
5689 + printk(KERN_CONT "%08x ", c);
5690 + }
5691 + printk("\n");
5692 +}
5693 +#endif
5694 +
5695 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5696 {
5697 struct mm_struct *mm = current->mm;
5698 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5699 if (!vma)
5700 goto bad_area;
5701
5702 +#ifdef CONFIG_PAX_PAGEEXEC
5703 + /* PaX: detect ITLB misses on non-exec pages */
5704 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5705 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5706 + {
5707 + if (address != regs->tpc)
5708 + goto good_area;
5709 +
5710 + up_read(&mm->mmap_sem);
5711 + switch (pax_handle_fetch_fault(regs)) {
5712 +
5713 +#ifdef CONFIG_PAX_EMUPLT
5714 + case 2:
5715 + case 3:
5716 + return;
5717 +#endif
5718 +
5719 + }
5720 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5721 + do_group_exit(SIGKILL);
5722 + }
5723 +#endif
5724 +
5725 /* Pure DTLB misses do not tell us whether the fault causing
5726 * load/store/atomic was a write or not, it only says that there
5727 * was no match. So in such a case we (carefully) read the
5728 diff -urNp linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c
5729 --- linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
5730 +++ linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
5731 @@ -69,7 +69,7 @@ full_search:
5732 }
5733 return -ENOMEM;
5734 }
5735 - if (likely(!vma || addr + len <= vma->vm_start)) {
5736 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5737 /*
5738 * Remember the place where we stopped the search:
5739 */
5740 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
5741 /* make sure it can fit in the remaining address space */
5742 if (likely(addr > len)) {
5743 vma = find_vma(mm, addr-len);
5744 - if (!vma || addr <= vma->vm_start) {
5745 + if (check_heap_stack_gap(vma, addr - len, len)) {
5746 /* remember the address as a hint for next time */
5747 return (mm->free_area_cache = addr-len);
5748 }
5749 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
5750 if (unlikely(mm->mmap_base < len))
5751 goto bottomup;
5752
5753 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5754 + addr = mm->mmap_base - len;
5755
5756 do {
5757 + addr &= HPAGE_MASK;
5758 /*
5759 * Lookup failure means no vma is above this address,
5760 * else if new region fits below vma->vm_start,
5761 * return with success:
5762 */
5763 vma = find_vma(mm, addr);
5764 - if (likely(!vma || addr+len <= vma->vm_start)) {
5765 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5766 /* remember the address as a hint for next time */
5767 return (mm->free_area_cache = addr);
5768 }
5769 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
5770 mm->cached_hole_size = vma->vm_start - addr;
5771
5772 /* try just below the current vma->vm_start */
5773 - addr = (vma->vm_start-len) & HPAGE_MASK;
5774 - } while (likely(len < vma->vm_start));
5775 + addr = skip_heap_stack_gap(vma, len);
5776 + } while (!IS_ERR_VALUE(addr));
5777
5778 bottomup:
5779 /*
5780 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
5781 if (addr) {
5782 addr = ALIGN(addr, HPAGE_SIZE);
5783 vma = find_vma(mm, addr);
5784 - if (task_size - len >= addr &&
5785 - (!vma || addr + len <= vma->vm_start))
5786 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5787 return addr;
5788 }
5789 if (mm->get_unmapped_area == arch_get_unmapped_area)
5790 diff -urNp linux-2.6.32.42/arch/sparc/mm/init_32.c linux-2.6.32.42/arch/sparc/mm/init_32.c
5791 --- linux-2.6.32.42/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
5792 +++ linux-2.6.32.42/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
5793 @@ -317,6 +317,9 @@ extern void device_scan(void);
5794 pgprot_t PAGE_SHARED __read_mostly;
5795 EXPORT_SYMBOL(PAGE_SHARED);
5796
5797 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5798 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5799 +
5800 void __init paging_init(void)
5801 {
5802 switch(sparc_cpu_model) {
5803 @@ -345,17 +348,17 @@ void __init paging_init(void)
5804
5805 /* Initialize the protection map with non-constant, MMU dependent values. */
5806 protection_map[0] = PAGE_NONE;
5807 - protection_map[1] = PAGE_READONLY;
5808 - protection_map[2] = PAGE_COPY;
5809 - protection_map[3] = PAGE_COPY;
5810 + protection_map[1] = PAGE_READONLY_NOEXEC;
5811 + protection_map[2] = PAGE_COPY_NOEXEC;
5812 + protection_map[3] = PAGE_COPY_NOEXEC;
5813 protection_map[4] = PAGE_READONLY;
5814 protection_map[5] = PAGE_READONLY;
5815 protection_map[6] = PAGE_COPY;
5816 protection_map[7] = PAGE_COPY;
5817 protection_map[8] = PAGE_NONE;
5818 - protection_map[9] = PAGE_READONLY;
5819 - protection_map[10] = PAGE_SHARED;
5820 - protection_map[11] = PAGE_SHARED;
5821 + protection_map[9] = PAGE_READONLY_NOEXEC;
5822 + protection_map[10] = PAGE_SHARED_NOEXEC;
5823 + protection_map[11] = PAGE_SHARED_NOEXEC;
5824 protection_map[12] = PAGE_READONLY;
5825 protection_map[13] = PAGE_READONLY;
5826 protection_map[14] = PAGE_SHARED;
5827 diff -urNp linux-2.6.32.42/arch/sparc/mm/Makefile linux-2.6.32.42/arch/sparc/mm/Makefile
5828 --- linux-2.6.32.42/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
5829 +++ linux-2.6.32.42/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
5830 @@ -2,7 +2,7 @@
5831 #
5832
5833 asflags-y := -ansi
5834 -ccflags-y := -Werror
5835 +#ccflags-y := -Werror
5836
5837 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5838 obj-y += fault_$(BITS).o
5839 diff -urNp linux-2.6.32.42/arch/sparc/mm/srmmu.c linux-2.6.32.42/arch/sparc/mm/srmmu.c
5840 --- linux-2.6.32.42/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
5841 +++ linux-2.6.32.42/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
5842 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5843 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5844 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5845 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5846 +
5847 +#ifdef CONFIG_PAX_PAGEEXEC
5848 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5849 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5850 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5851 +#endif
5852 +
5853 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5854 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5855
5856 diff -urNp linux-2.6.32.42/arch/um/include/asm/kmap_types.h linux-2.6.32.42/arch/um/include/asm/kmap_types.h
5857 --- linux-2.6.32.42/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
5858 +++ linux-2.6.32.42/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
5859 @@ -23,6 +23,7 @@ enum km_type {
5860 KM_IRQ1,
5861 KM_SOFTIRQ0,
5862 KM_SOFTIRQ1,
5863 + KM_CLEARPAGE,
5864 KM_TYPE_NR
5865 };
5866
5867 diff -urNp linux-2.6.32.42/arch/um/include/asm/page.h linux-2.6.32.42/arch/um/include/asm/page.h
5868 --- linux-2.6.32.42/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
5869 +++ linux-2.6.32.42/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
5870 @@ -14,6 +14,9 @@
5871 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5872 #define PAGE_MASK (~(PAGE_SIZE-1))
5873
5874 +#define ktla_ktva(addr) (addr)
5875 +#define ktva_ktla(addr) (addr)
5876 +
5877 #ifndef __ASSEMBLY__
5878
5879 struct page;
5880 diff -urNp linux-2.6.32.42/arch/um/kernel/process.c linux-2.6.32.42/arch/um/kernel/process.c
5881 --- linux-2.6.32.42/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
5882 +++ linux-2.6.32.42/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
5883 @@ -393,22 +393,6 @@ int singlestepping(void * t)
5884 return 2;
5885 }
5886
5887 -/*
5888 - * Only x86 and x86_64 have an arch_align_stack().
5889 - * All other arches have "#define arch_align_stack(x) (x)"
5890 - * in their asm/system.h
5891 - * As this is included in UML from asm-um/system-generic.h,
5892 - * we can use it to behave as the subarch does.
5893 - */
5894 -#ifndef arch_align_stack
5895 -unsigned long arch_align_stack(unsigned long sp)
5896 -{
5897 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5898 - sp -= get_random_int() % 8192;
5899 - return sp & ~0xf;
5900 -}
5901 -#endif
5902 -
5903 unsigned long get_wchan(struct task_struct *p)
5904 {
5905 unsigned long stack_page, sp, ip;
5906 diff -urNp linux-2.6.32.42/arch/um/sys-i386/syscalls.c linux-2.6.32.42/arch/um/sys-i386/syscalls.c
5907 --- linux-2.6.32.42/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
5908 +++ linux-2.6.32.42/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
5909 @@ -11,6 +11,21 @@
5910 #include "asm/uaccess.h"
5911 #include "asm/unistd.h"
5912
5913 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5914 +{
5915 + unsigned long pax_task_size = TASK_SIZE;
5916 +
5917 +#ifdef CONFIG_PAX_SEGMEXEC
5918 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5919 + pax_task_size = SEGMEXEC_TASK_SIZE;
5920 +#endif
5921 +
5922 + if (len > pax_task_size || addr > pax_task_size - len)
5923 + return -EINVAL;
5924 +
5925 + return 0;
5926 +}
5927 +
5928 /*
5929 * Perform the select(nd, in, out, ex, tv) and mmap() system
5930 * calls. Linux/i386 didn't use to be able to handle more than
5931 diff -urNp linux-2.6.32.42/arch/x86/boot/bitops.h linux-2.6.32.42/arch/x86/boot/bitops.h
5932 --- linux-2.6.32.42/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
5933 +++ linux-2.6.32.42/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
5934 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5935 u8 v;
5936 const u32 *p = (const u32 *)addr;
5937
5938 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5939 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5940 return v;
5941 }
5942
5943 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5944
5945 static inline void set_bit(int nr, void *addr)
5946 {
5947 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5948 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5949 }
5950
5951 #endif /* BOOT_BITOPS_H */
5952 diff -urNp linux-2.6.32.42/arch/x86/boot/boot.h linux-2.6.32.42/arch/x86/boot/boot.h
5953 --- linux-2.6.32.42/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
5954 +++ linux-2.6.32.42/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
5955 @@ -82,7 +82,7 @@ static inline void io_delay(void)
5956 static inline u16 ds(void)
5957 {
5958 u16 seg;
5959 - asm("movw %%ds,%0" : "=rm" (seg));
5960 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5961 return seg;
5962 }
5963
5964 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
5965 static inline int memcmp(const void *s1, const void *s2, size_t len)
5966 {
5967 u8 diff;
5968 - asm("repe; cmpsb; setnz %0"
5969 + asm volatile("repe; cmpsb; setnz %0"
5970 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5971 return diff;
5972 }
5973 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/head_32.S linux-2.6.32.42/arch/x86/boot/compressed/head_32.S
5974 --- linux-2.6.32.42/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
5975 +++ linux-2.6.32.42/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
5976 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5977 notl %eax
5978 andl %eax, %ebx
5979 #else
5980 - movl $LOAD_PHYSICAL_ADDR, %ebx
5981 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5982 #endif
5983
5984 /* Target address to relocate to for decompression */
5985 @@ -149,7 +149,7 @@ relocated:
5986 * and where it was actually loaded.
5987 */
5988 movl %ebp, %ebx
5989 - subl $LOAD_PHYSICAL_ADDR, %ebx
5990 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5991 jz 2f /* Nothing to be done if loaded at compiled addr. */
5992 /*
5993 * Process relocations.
5994 @@ -157,8 +157,7 @@ relocated:
5995
5996 1: subl $4, %edi
5997 movl (%edi), %ecx
5998 - testl %ecx, %ecx
5999 - jz 2f
6000 + jecxz 2f
6001 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6002 jmp 1b
6003 2:
6004 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/head_64.S linux-2.6.32.42/arch/x86/boot/compressed/head_64.S
6005 --- linux-2.6.32.42/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6006 +++ linux-2.6.32.42/arch/x86/boot/compressed/head_64.S 2011-04-17 15:56:46.000000000 -0400
6007 @@ -91,7 +91,7 @@ ENTRY(startup_32)
6008 notl %eax
6009 andl %eax, %ebx
6010 #else
6011 - movl $LOAD_PHYSICAL_ADDR, %ebx
6012 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6013 #endif
6014
6015 /* Target address to relocate to for decompression */
6016 @@ -234,7 +234,7 @@ ENTRY(startup_64)
6017 notq %rax
6018 andq %rax, %rbp
6019 #else
6020 - movq $LOAD_PHYSICAL_ADDR, %rbp
6021 + movq $____LOAD_PHYSICAL_ADDR, %rbp
6022 #endif
6023
6024 /* Target address to relocate to for decompression */
6025 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/misc.c linux-2.6.32.42/arch/x86/boot/compressed/misc.c
6026 --- linux-2.6.32.42/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6027 +++ linux-2.6.32.42/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6028 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
6029 case PT_LOAD:
6030 #ifdef CONFIG_RELOCATABLE
6031 dest = output;
6032 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6033 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6034 #else
6035 dest = (void *)(phdr->p_paddr);
6036 #endif
6037 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6038 error("Destination address too large");
6039 #endif
6040 #ifndef CONFIG_RELOCATABLE
6041 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6042 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6043 error("Wrong destination address");
6044 #endif
6045
6046 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c
6047 --- linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6048 +++ linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6049 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6050
6051 offs = (olen > ilen) ? olen - ilen : 0;
6052 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6053 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6054 + offs += 64*1024; /* Add 64K bytes slack */
6055 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6056
6057 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6058 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/relocs.c linux-2.6.32.42/arch/x86/boot/compressed/relocs.c
6059 --- linux-2.6.32.42/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6060 +++ linux-2.6.32.42/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6061 @@ -10,8 +10,11 @@
6062 #define USE_BSD
6063 #include <endian.h>
6064
6065 +#include "../../../../include/linux/autoconf.h"
6066 +
6067 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6068 static Elf32_Ehdr ehdr;
6069 +static Elf32_Phdr *phdr;
6070 static unsigned long reloc_count, reloc_idx;
6071 static unsigned long *relocs;
6072
6073 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6074
6075 static int is_safe_abs_reloc(const char* sym_name)
6076 {
6077 - int i;
6078 + unsigned int i;
6079
6080 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6081 if (!strcmp(sym_name, safe_abs_relocs[i]))
6082 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6083 }
6084 }
6085
6086 +static void read_phdrs(FILE *fp)
6087 +{
6088 + unsigned int i;
6089 +
6090 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6091 + if (!phdr) {
6092 + die("Unable to allocate %d program headers\n",
6093 + ehdr.e_phnum);
6094 + }
6095 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6096 + die("Seek to %d failed: %s\n",
6097 + ehdr.e_phoff, strerror(errno));
6098 + }
6099 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6100 + die("Cannot read ELF program headers: %s\n",
6101 + strerror(errno));
6102 + }
6103 + for(i = 0; i < ehdr.e_phnum; i++) {
6104 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6105 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6106 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6107 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6108 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6109 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6110 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6111 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6112 + }
6113 +
6114 +}
6115 +
6116 static void read_shdrs(FILE *fp)
6117 {
6118 - int i;
6119 + unsigned int i;
6120 Elf32_Shdr shdr;
6121
6122 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6123 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6124
6125 static void read_strtabs(FILE *fp)
6126 {
6127 - int i;
6128 + unsigned int i;
6129 for (i = 0; i < ehdr.e_shnum; i++) {
6130 struct section *sec = &secs[i];
6131 if (sec->shdr.sh_type != SHT_STRTAB) {
6132 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6133
6134 static void read_symtabs(FILE *fp)
6135 {
6136 - int i,j;
6137 + unsigned int i,j;
6138 for (i = 0; i < ehdr.e_shnum; i++) {
6139 struct section *sec = &secs[i];
6140 if (sec->shdr.sh_type != SHT_SYMTAB) {
6141 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6142
6143 static void read_relocs(FILE *fp)
6144 {
6145 - int i,j;
6146 + unsigned int i,j;
6147 + uint32_t base;
6148 +
6149 for (i = 0; i < ehdr.e_shnum; i++) {
6150 struct section *sec = &secs[i];
6151 if (sec->shdr.sh_type != SHT_REL) {
6152 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6153 die("Cannot read symbol table: %s\n",
6154 strerror(errno));
6155 }
6156 + base = 0;
6157 + for (j = 0; j < ehdr.e_phnum; j++) {
6158 + if (phdr[j].p_type != PT_LOAD )
6159 + continue;
6160 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6161 + continue;
6162 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6163 + break;
6164 + }
6165 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6166 Elf32_Rel *rel = &sec->reltab[j];
6167 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6168 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6169 rel->r_info = elf32_to_cpu(rel->r_info);
6170 }
6171 }
6172 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6173
6174 static void print_absolute_symbols(void)
6175 {
6176 - int i;
6177 + unsigned int i;
6178 printf("Absolute symbols\n");
6179 printf(" Num: Value Size Type Bind Visibility Name\n");
6180 for (i = 0; i < ehdr.e_shnum; i++) {
6181 struct section *sec = &secs[i];
6182 char *sym_strtab;
6183 Elf32_Sym *sh_symtab;
6184 - int j;
6185 + unsigned int j;
6186
6187 if (sec->shdr.sh_type != SHT_SYMTAB) {
6188 continue;
6189 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6190
6191 static void print_absolute_relocs(void)
6192 {
6193 - int i, printed = 0;
6194 + unsigned int i, printed = 0;
6195
6196 for (i = 0; i < ehdr.e_shnum; i++) {
6197 struct section *sec = &secs[i];
6198 struct section *sec_applies, *sec_symtab;
6199 char *sym_strtab;
6200 Elf32_Sym *sh_symtab;
6201 - int j;
6202 + unsigned int j;
6203 if (sec->shdr.sh_type != SHT_REL) {
6204 continue;
6205 }
6206 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6207
6208 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6209 {
6210 - int i;
6211 + unsigned int i;
6212 /* Walk through the relocations */
6213 for (i = 0; i < ehdr.e_shnum; i++) {
6214 char *sym_strtab;
6215 Elf32_Sym *sh_symtab;
6216 struct section *sec_applies, *sec_symtab;
6217 - int j;
6218 + unsigned int j;
6219 struct section *sec = &secs[i];
6220
6221 if (sec->shdr.sh_type != SHT_REL) {
6222 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6223 if (sym->st_shndx == SHN_ABS) {
6224 continue;
6225 }
6226 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6227 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6228 + continue;
6229 +
6230 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6231 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6232 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6233 + continue;
6234 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6235 + continue;
6236 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6237 + continue;
6238 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6239 + continue;
6240 +#endif
6241 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6242 /*
6243 * NONE can be ignored and and PC relative
6244 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6245
6246 static void emit_relocs(int as_text)
6247 {
6248 - int i;
6249 + unsigned int i;
6250 /* Count how many relocations I have and allocate space for them. */
6251 reloc_count = 0;
6252 walk_relocs(count_reloc);
6253 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
6254 fname, strerror(errno));
6255 }
6256 read_ehdr(fp);
6257 + read_phdrs(fp);
6258 read_shdrs(fp);
6259 read_strtabs(fp);
6260 read_symtabs(fp);
6261 diff -urNp linux-2.6.32.42/arch/x86/boot/cpucheck.c linux-2.6.32.42/arch/x86/boot/cpucheck.c
6262 --- linux-2.6.32.42/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6263 +++ linux-2.6.32.42/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6264 @@ -74,7 +74,7 @@ static int has_fpu(void)
6265 u16 fcw = -1, fsw = -1;
6266 u32 cr0;
6267
6268 - asm("movl %%cr0,%0" : "=r" (cr0));
6269 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6270 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6271 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6272 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6273 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6274 {
6275 u32 f0, f1;
6276
6277 - asm("pushfl ; "
6278 + asm volatile("pushfl ; "
6279 "pushfl ; "
6280 "popl %0 ; "
6281 "movl %0,%1 ; "
6282 @@ -115,7 +115,7 @@ static void get_flags(void)
6283 set_bit(X86_FEATURE_FPU, cpu.flags);
6284
6285 if (has_eflag(X86_EFLAGS_ID)) {
6286 - asm("cpuid"
6287 + asm volatile("cpuid"
6288 : "=a" (max_intel_level),
6289 "=b" (cpu_vendor[0]),
6290 "=d" (cpu_vendor[1]),
6291 @@ -124,7 +124,7 @@ static void get_flags(void)
6292
6293 if (max_intel_level >= 0x00000001 &&
6294 max_intel_level <= 0x0000ffff) {
6295 - asm("cpuid"
6296 + asm volatile("cpuid"
6297 : "=a" (tfms),
6298 "=c" (cpu.flags[4]),
6299 "=d" (cpu.flags[0])
6300 @@ -136,7 +136,7 @@ static void get_flags(void)
6301 cpu.model += ((tfms >> 16) & 0xf) << 4;
6302 }
6303
6304 - asm("cpuid"
6305 + asm volatile("cpuid"
6306 : "=a" (max_amd_level)
6307 : "a" (0x80000000)
6308 : "ebx", "ecx", "edx");
6309 @@ -144,7 +144,7 @@ static void get_flags(void)
6310 if (max_amd_level >= 0x80000001 &&
6311 max_amd_level <= 0x8000ffff) {
6312 u32 eax = 0x80000001;
6313 - asm("cpuid"
6314 + asm volatile("cpuid"
6315 : "+a" (eax),
6316 "=c" (cpu.flags[6]),
6317 "=d" (cpu.flags[1])
6318 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6319 u32 ecx = MSR_K7_HWCR;
6320 u32 eax, edx;
6321
6322 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6323 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6324 eax &= ~(1 << 15);
6325 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6326 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6327
6328 get_flags(); /* Make sure it really did something */
6329 err = check_flags();
6330 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6331 u32 ecx = MSR_VIA_FCR;
6332 u32 eax, edx;
6333
6334 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6335 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6336 eax |= (1<<1)|(1<<7);
6337 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6338 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6339
6340 set_bit(X86_FEATURE_CX8, cpu.flags);
6341 err = check_flags();
6342 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6343 u32 eax, edx;
6344 u32 level = 1;
6345
6346 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6347 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6348 - asm("cpuid"
6349 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6350 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6351 + asm volatile("cpuid"
6352 : "+a" (level), "=d" (cpu.flags[0])
6353 : : "ecx", "ebx");
6354 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6355 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6356
6357 err = check_flags();
6358 }
6359 diff -urNp linux-2.6.32.42/arch/x86/boot/header.S linux-2.6.32.42/arch/x86/boot/header.S
6360 --- linux-2.6.32.42/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6361 +++ linux-2.6.32.42/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6362 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6363 # single linked list of
6364 # struct setup_data
6365
6366 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6367 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6368
6369 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6370 #define VO_INIT_SIZE (VO__end - VO__text)
6371 diff -urNp linux-2.6.32.42/arch/x86/boot/memory.c linux-2.6.32.42/arch/x86/boot/memory.c
6372 --- linux-2.6.32.42/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6373 +++ linux-2.6.32.42/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6374 @@ -19,7 +19,7 @@
6375
6376 static int detect_memory_e820(void)
6377 {
6378 - int count = 0;
6379 + unsigned int count = 0;
6380 struct biosregs ireg, oreg;
6381 struct e820entry *desc = boot_params.e820_map;
6382 static struct e820entry buf; /* static so it is zeroed */
6383 diff -urNp linux-2.6.32.42/arch/x86/boot/video.c linux-2.6.32.42/arch/x86/boot/video.c
6384 --- linux-2.6.32.42/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6385 +++ linux-2.6.32.42/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6386 @@ -90,7 +90,7 @@ static void store_mode_params(void)
6387 static unsigned int get_entry(void)
6388 {
6389 char entry_buf[4];
6390 - int i, len = 0;
6391 + unsigned int i, len = 0;
6392 int key;
6393 unsigned int v;
6394
6395 diff -urNp linux-2.6.32.42/arch/x86/boot/video-vesa.c linux-2.6.32.42/arch/x86/boot/video-vesa.c
6396 --- linux-2.6.32.42/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6397 +++ linux-2.6.32.42/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6398 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6399
6400 boot_params.screen_info.vesapm_seg = oreg.es;
6401 boot_params.screen_info.vesapm_off = oreg.di;
6402 + boot_params.screen_info.vesapm_size = oreg.cx;
6403 }
6404
6405 /*
6406 diff -urNp linux-2.6.32.42/arch/x86/ia32/ia32_aout.c linux-2.6.32.42/arch/x86/ia32/ia32_aout.c
6407 --- linux-2.6.32.42/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6408 +++ linux-2.6.32.42/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6409 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6410 unsigned long dump_start, dump_size;
6411 struct user32 dump;
6412
6413 + memset(&dump, 0, sizeof(dump));
6414 +
6415 fs = get_fs();
6416 set_fs(KERNEL_DS);
6417 has_dumped = 1;
6418 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6419 dump_size = dump.u_ssize << PAGE_SHIFT;
6420 DUMP_WRITE(dump_start, dump_size);
6421 }
6422 - /*
6423 - * Finally dump the task struct. Not be used by gdb, but
6424 - * could be useful
6425 - */
6426 - set_fs(KERNEL_DS);
6427 - DUMP_WRITE(current, sizeof(*current));
6428 end_coredump:
6429 set_fs(fs);
6430 return has_dumped;
6431 diff -urNp linux-2.6.32.42/arch/x86/ia32/ia32entry.S linux-2.6.32.42/arch/x86/ia32/ia32entry.S
6432 --- linux-2.6.32.42/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6433 +++ linux-2.6.32.42/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6434 @@ -13,6 +13,7 @@
6435 #include <asm/thread_info.h>
6436 #include <asm/segment.h>
6437 #include <asm/irqflags.h>
6438 +#include <asm/pgtable.h>
6439 #include <linux/linkage.h>
6440
6441 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6442 @@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6443 ENDPROC(native_irq_enable_sysexit)
6444 #endif
6445
6446 + .macro pax_enter_kernel_user
6447 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6448 + call pax_enter_kernel_user
6449 +#endif
6450 + .endm
6451 +
6452 + .macro pax_exit_kernel_user
6453 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6454 + call pax_exit_kernel_user
6455 +#endif
6456 +#ifdef CONFIG_PAX_RANDKSTACK
6457 + pushq %rax
6458 + call pax_randomize_kstack
6459 + popq %rax
6460 +#endif
6461 + pax_erase_kstack
6462 + .endm
6463 +
6464 +.macro pax_erase_kstack
6465 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6466 + call pax_erase_kstack
6467 +#endif
6468 +.endm
6469 +
6470 /*
6471 * 32bit SYSENTER instruction entry.
6472 *
6473 @@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6474 CFI_REGISTER rsp,rbp
6475 SWAPGS_UNSAFE_STACK
6476 movq PER_CPU_VAR(kernel_stack), %rsp
6477 - addq $(KERNEL_STACK_OFFSET),%rsp
6478 + pax_enter_kernel_user
6479 /*
6480 * No need to follow this irqs on/off section: the syscall
6481 * disabled irqs, here we enable it straight after entry:
6482 @@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6483 pushfq
6484 CFI_ADJUST_CFA_OFFSET 8
6485 /*CFI_REL_OFFSET rflags,0*/
6486 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6487 + GET_THREAD_INFO(%r10)
6488 + movl TI_sysenter_return(%r10), %r10d
6489 CFI_REGISTER rip,r10
6490 pushq $__USER32_CS
6491 CFI_ADJUST_CFA_OFFSET 8
6492 @@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6493 SAVE_ARGS 0,0,1
6494 /* no need to do an access_ok check here because rbp has been
6495 32bit zero extended */
6496 +
6497 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6498 + mov $PAX_USER_SHADOW_BASE,%r10
6499 + add %r10,%rbp
6500 +#endif
6501 +
6502 1: movl (%rbp),%ebp
6503 .section __ex_table,"a"
6504 .quad 1b,ia32_badarg
6505 @@ -172,6 +204,7 @@ sysenter_dispatch:
6506 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6507 jnz sysexit_audit
6508 sysexit_from_sys_call:
6509 + pax_exit_kernel_user
6510 andl $~TS_COMPAT,TI_status(%r10)
6511 /* clear IF, that popfq doesn't enable interrupts early */
6512 andl $~0x200,EFLAGS-R11(%rsp)
6513 @@ -200,6 +233,9 @@ sysexit_from_sys_call:
6514 movl %eax,%esi /* 2nd arg: syscall number */
6515 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6516 call audit_syscall_entry
6517 +
6518 + pax_erase_kstack
6519 +
6520 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6521 cmpq $(IA32_NR_syscalls-1),%rax
6522 ja ia32_badsys
6523 @@ -252,6 +288,9 @@ sysenter_tracesys:
6524 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6525 movq %rsp,%rdi /* &pt_regs -> arg1 */
6526 call syscall_trace_enter
6527 +
6528 + pax_erase_kstack
6529 +
6530 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6531 RESTORE_REST
6532 cmpq $(IA32_NR_syscalls-1),%rax
6533 @@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6534 ENTRY(ia32_cstar_target)
6535 CFI_STARTPROC32 simple
6536 CFI_SIGNAL_FRAME
6537 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6538 + CFI_DEF_CFA rsp,0
6539 CFI_REGISTER rip,rcx
6540 /*CFI_REGISTER rflags,r11*/
6541 SWAPGS_UNSAFE_STACK
6542 movl %esp,%r8d
6543 CFI_REGISTER rsp,r8
6544 movq PER_CPU_VAR(kernel_stack),%rsp
6545 +
6546 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6547 + pax_enter_kernel_user
6548 +#endif
6549 +
6550 /*
6551 * No need to follow this irqs on/off section: the syscall
6552 * disabled irqs and here we enable it straight after entry:
6553 */
6554 ENABLE_INTERRUPTS(CLBR_NONE)
6555 - SAVE_ARGS 8,1,1
6556 + SAVE_ARGS 8*6,1,1
6557 movl %eax,%eax /* zero extension */
6558 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6559 movq %rcx,RIP-ARGOFFSET(%rsp)
6560 @@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6561 /* no need to do an access_ok check here because r8 has been
6562 32bit zero extended */
6563 /* hardware stack frame is complete now */
6564 +
6565 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6566 + mov $PAX_USER_SHADOW_BASE,%r10
6567 + add %r10,%r8
6568 +#endif
6569 +
6570 1: movl (%r8),%r9d
6571 .section __ex_table,"a"
6572 .quad 1b,ia32_badarg
6573 @@ -333,6 +383,7 @@ cstar_dispatch:
6574 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6575 jnz sysretl_audit
6576 sysretl_from_sys_call:
6577 + pax_exit_kernel_user
6578 andl $~TS_COMPAT,TI_status(%r10)
6579 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6580 movl RIP-ARGOFFSET(%rsp),%ecx
6581 @@ -370,6 +421,9 @@ cstar_tracesys:
6582 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6583 movq %rsp,%rdi /* &pt_regs -> arg1 */
6584 call syscall_trace_enter
6585 +
6586 + pax_erase_kstack
6587 +
6588 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6589 RESTORE_REST
6590 xchgl %ebp,%r9d
6591 @@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6592 CFI_REL_OFFSET rip,RIP-RIP
6593 PARAVIRT_ADJUST_EXCEPTION_FRAME
6594 SWAPGS
6595 + pax_enter_kernel_user
6596 /*
6597 * No need to follow this irqs on/off section: the syscall
6598 * disabled irqs and here we enable it straight after entry:
6599 @@ -448,6 +503,9 @@ ia32_tracesys:
6600 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6601 movq %rsp,%rdi /* &pt_regs -> arg1 */
6602 call syscall_trace_enter
6603 +
6604 + pax_erase_kstack
6605 +
6606 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6607 RESTORE_REST
6608 cmpq $(IA32_NR_syscalls-1),%rax
6609 diff -urNp linux-2.6.32.42/arch/x86/ia32/ia32_signal.c linux-2.6.32.42/arch/x86/ia32/ia32_signal.c
6610 --- linux-2.6.32.42/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6611 +++ linux-2.6.32.42/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6612 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6613 sp -= frame_size;
6614 /* Align the stack pointer according to the i386 ABI,
6615 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6616 - sp = ((sp + 4) & -16ul) - 4;
6617 + sp = ((sp - 12) & -16ul) - 4;
6618 return (void __user *) sp;
6619 }
6620
6621 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6622 * These are actually not used anymore, but left because some
6623 * gdb versions depend on them as a marker.
6624 */
6625 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6626 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6627 } put_user_catch(err);
6628
6629 if (err)
6630 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6631 0xb8,
6632 __NR_ia32_rt_sigreturn,
6633 0x80cd,
6634 - 0,
6635 + 0
6636 };
6637
6638 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6639 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6640
6641 if (ka->sa.sa_flags & SA_RESTORER)
6642 restorer = ka->sa.sa_restorer;
6643 + else if (current->mm->context.vdso)
6644 + /* Return stub is in 32bit vsyscall page */
6645 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6646 else
6647 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6648 - rt_sigreturn);
6649 + restorer = &frame->retcode;
6650 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6651
6652 /*
6653 * Not actually used anymore, but left because some gdb
6654 * versions need it.
6655 */
6656 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6657 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6658 } put_user_catch(err);
6659
6660 if (err)
6661 diff -urNp linux-2.6.32.42/arch/x86/include/asm/alternative.h linux-2.6.32.42/arch/x86/include/asm/alternative.h
6662 --- linux-2.6.32.42/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6663 +++ linux-2.6.32.42/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6664 @@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6665 " .byte 662b-661b\n" /* sourcelen */ \
6666 " .byte 664f-663f\n" /* replacementlen */ \
6667 ".previous\n" \
6668 - ".section .altinstr_replacement, \"ax\"\n" \
6669 + ".section .altinstr_replacement, \"a\"\n" \
6670 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6671 ".previous"
6672
6673 diff -urNp linux-2.6.32.42/arch/x86/include/asm/apm.h linux-2.6.32.42/arch/x86/include/asm/apm.h
6674 --- linux-2.6.32.42/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
6675 +++ linux-2.6.32.42/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
6676 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6677 __asm__ __volatile__(APM_DO_ZERO_SEGS
6678 "pushl %%edi\n\t"
6679 "pushl %%ebp\n\t"
6680 - "lcall *%%cs:apm_bios_entry\n\t"
6681 + "lcall *%%ss:apm_bios_entry\n\t"
6682 "setc %%al\n\t"
6683 "popl %%ebp\n\t"
6684 "popl %%edi\n\t"
6685 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6686 __asm__ __volatile__(APM_DO_ZERO_SEGS
6687 "pushl %%edi\n\t"
6688 "pushl %%ebp\n\t"
6689 - "lcall *%%cs:apm_bios_entry\n\t"
6690 + "lcall *%%ss:apm_bios_entry\n\t"
6691 "setc %%bl\n\t"
6692 "popl %%ebp\n\t"
6693 "popl %%edi\n\t"
6694 diff -urNp linux-2.6.32.42/arch/x86/include/asm/atomic_32.h linux-2.6.32.42/arch/x86/include/asm/atomic_32.h
6695 --- linux-2.6.32.42/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
6696 +++ linux-2.6.32.42/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
6697 @@ -25,6 +25,17 @@ static inline int atomic_read(const atom
6698 }
6699
6700 /**
6701 + * atomic_read_unchecked - read atomic variable
6702 + * @v: pointer of type atomic_unchecked_t
6703 + *
6704 + * Atomically reads the value of @v.
6705 + */
6706 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6707 +{
6708 + return v->counter;
6709 +}
6710 +
6711 +/**
6712 * atomic_set - set atomic variable
6713 * @v: pointer of type atomic_t
6714 * @i: required value
6715 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
6716 }
6717
6718 /**
6719 + * atomic_set_unchecked - set atomic variable
6720 + * @v: pointer of type atomic_unchecked_t
6721 + * @i: required value
6722 + *
6723 + * Atomically sets the value of @v to @i.
6724 + */
6725 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6726 +{
6727 + v->counter = i;
6728 +}
6729 +
6730 +/**
6731 * atomic_add - add integer to atomic variable
6732 * @i: integer value to add
6733 * @v: pointer of type atomic_t
6734 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
6735 */
6736 static inline void atomic_add(int i, atomic_t *v)
6737 {
6738 - asm volatile(LOCK_PREFIX "addl %1,%0"
6739 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6740 +
6741 +#ifdef CONFIG_PAX_REFCOUNT
6742 + "jno 0f\n"
6743 + LOCK_PREFIX "subl %1,%0\n"
6744 + "int $4\n0:\n"
6745 + _ASM_EXTABLE(0b, 0b)
6746 +#endif
6747 +
6748 + : "+m" (v->counter)
6749 + : "ir" (i));
6750 +}
6751 +
6752 +/**
6753 + * atomic_add_unchecked - add integer to atomic variable
6754 + * @i: integer value to add
6755 + * @v: pointer of type atomic_unchecked_t
6756 + *
6757 + * Atomically adds @i to @v.
6758 + */
6759 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6760 +{
6761 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6762 : "+m" (v->counter)
6763 : "ir" (i));
6764 }
6765 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
6766 */
6767 static inline void atomic_sub(int i, atomic_t *v)
6768 {
6769 - asm volatile(LOCK_PREFIX "subl %1,%0"
6770 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6771 +
6772 +#ifdef CONFIG_PAX_REFCOUNT
6773 + "jno 0f\n"
6774 + LOCK_PREFIX "addl %1,%0\n"
6775 + "int $4\n0:\n"
6776 + _ASM_EXTABLE(0b, 0b)
6777 +#endif
6778 +
6779 + : "+m" (v->counter)
6780 + : "ir" (i));
6781 +}
6782 +
6783 +/**
6784 + * atomic_sub_unchecked - subtract integer from atomic variable
6785 + * @i: integer value to subtract
6786 + * @v: pointer of type atomic_unchecked_t
6787 + *
6788 + * Atomically subtracts @i from @v.
6789 + */
6790 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6791 +{
6792 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6793 : "+m" (v->counter)
6794 : "ir" (i));
6795 }
6796 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
6797 {
6798 unsigned char c;
6799
6800 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6801 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6802 +
6803 +#ifdef CONFIG_PAX_REFCOUNT
6804 + "jno 0f\n"
6805 + LOCK_PREFIX "addl %2,%0\n"
6806 + "int $4\n0:\n"
6807 + _ASM_EXTABLE(0b, 0b)
6808 +#endif
6809 +
6810 + "sete %1\n"
6811 : "+m" (v->counter), "=qm" (c)
6812 : "ir" (i) : "memory");
6813 return c;
6814 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
6815 */
6816 static inline void atomic_inc(atomic_t *v)
6817 {
6818 - asm volatile(LOCK_PREFIX "incl %0"
6819 + asm volatile(LOCK_PREFIX "incl %0\n"
6820 +
6821 +#ifdef CONFIG_PAX_REFCOUNT
6822 + "jno 0f\n"
6823 + LOCK_PREFIX "decl %0\n"
6824 + "int $4\n0:\n"
6825 + _ASM_EXTABLE(0b, 0b)
6826 +#endif
6827 +
6828 + : "+m" (v->counter));
6829 +}
6830 +
6831 +/**
6832 + * atomic_inc_unchecked - increment atomic variable
6833 + * @v: pointer of type atomic_unchecked_t
6834 + *
6835 + * Atomically increments @v by 1.
6836 + */
6837 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6838 +{
6839 + asm volatile(LOCK_PREFIX "incl %0\n"
6840 : "+m" (v->counter));
6841 }
6842
6843 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
6844 */
6845 static inline void atomic_dec(atomic_t *v)
6846 {
6847 - asm volatile(LOCK_PREFIX "decl %0"
6848 + asm volatile(LOCK_PREFIX "decl %0\n"
6849 +
6850 +#ifdef CONFIG_PAX_REFCOUNT
6851 + "jno 0f\n"
6852 + LOCK_PREFIX "incl %0\n"
6853 + "int $4\n0:\n"
6854 + _ASM_EXTABLE(0b, 0b)
6855 +#endif
6856 +
6857 + : "+m" (v->counter));
6858 +}
6859 +
6860 +/**
6861 + * atomic_dec_unchecked - decrement atomic variable
6862 + * @v: pointer of type atomic_unchecked_t
6863 + *
6864 + * Atomically decrements @v by 1.
6865 + */
6866 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6867 +{
6868 + asm volatile(LOCK_PREFIX "decl %0\n"
6869 : "+m" (v->counter));
6870 }
6871
6872 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
6873 {
6874 unsigned char c;
6875
6876 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6877 + asm volatile(LOCK_PREFIX "decl %0\n"
6878 +
6879 +#ifdef CONFIG_PAX_REFCOUNT
6880 + "jno 0f\n"
6881 + LOCK_PREFIX "incl %0\n"
6882 + "int $4\n0:\n"
6883 + _ASM_EXTABLE(0b, 0b)
6884 +#endif
6885 +
6886 + "sete %1\n"
6887 : "+m" (v->counter), "=qm" (c)
6888 : : "memory");
6889 return c != 0;
6890 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
6891 {
6892 unsigned char c;
6893
6894 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6895 + asm volatile(LOCK_PREFIX "incl %0\n"
6896 +
6897 +#ifdef CONFIG_PAX_REFCOUNT
6898 + "jno 0f\n"
6899 + LOCK_PREFIX "decl %0\n"
6900 + "into\n0:\n"
6901 + _ASM_EXTABLE(0b, 0b)
6902 +#endif
6903 +
6904 + "sete %1\n"
6905 + : "+m" (v->counter), "=qm" (c)
6906 + : : "memory");
6907 + return c != 0;
6908 +}
6909 +
6910 +/**
6911 + * atomic_inc_and_test_unchecked - increment and test
6912 + * @v: pointer of type atomic_unchecked_t
6913 + *
6914 + * Atomically increments @v by 1
6915 + * and returns true if the result is zero, or false for all
6916 + * other cases.
6917 + */
6918 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6919 +{
6920 + unsigned char c;
6921 +
6922 + asm volatile(LOCK_PREFIX "incl %0\n"
6923 + "sete %1\n"
6924 : "+m" (v->counter), "=qm" (c)
6925 : : "memory");
6926 return c != 0;
6927 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
6928 {
6929 unsigned char c;
6930
6931 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6932 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6933 +
6934 +#ifdef CONFIG_PAX_REFCOUNT
6935 + "jno 0f\n"
6936 + LOCK_PREFIX "subl %2,%0\n"
6937 + "int $4\n0:\n"
6938 + _ASM_EXTABLE(0b, 0b)
6939 +#endif
6940 +
6941 + "sets %1\n"
6942 : "+m" (v->counter), "=qm" (c)
6943 : "ir" (i) : "memory");
6944 return c;
6945 @@ -179,6 +341,46 @@ static inline int atomic_add_return(int
6946 #endif
6947 /* Modern 486+ processor */
6948 __i = i;
6949 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6950 +
6951 +#ifdef CONFIG_PAX_REFCOUNT
6952 + "jno 0f\n"
6953 + "movl %0, %1\n"
6954 + "int $4\n0:\n"
6955 + _ASM_EXTABLE(0b, 0b)
6956 +#endif
6957 +
6958 + : "+r" (i), "+m" (v->counter)
6959 + : : "memory");
6960 + return i + __i;
6961 +
6962 +#ifdef CONFIG_M386
6963 +no_xadd: /* Legacy 386 processor */
6964 + local_irq_save(flags);
6965 + __i = atomic_read(v);
6966 + atomic_set(v, i + __i);
6967 + local_irq_restore(flags);
6968 + return i + __i;
6969 +#endif
6970 +}
6971 +
6972 +/**
6973 + * atomic_add_return_unchecked - add integer and return
6974 + * @v: pointer of type atomic_unchecked_t
6975 + * @i: integer value to add
6976 + *
6977 + * Atomically adds @i to @v and returns @i + @v
6978 + */
6979 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6980 +{
6981 + int __i;
6982 +#ifdef CONFIG_M386
6983 + unsigned long flags;
6984 + if (unlikely(boot_cpu_data.x86 <= 3))
6985 + goto no_xadd;
6986 +#endif
6987 + /* Modern 486+ processor */
6988 + __i = i;
6989 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6990 : "+r" (i), "+m" (v->counter)
6991 : : "memory");
6992 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
6993 return cmpxchg(&v->counter, old, new);
6994 }
6995
6996 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6997 +{
6998 + return cmpxchg(&v->counter, old, new);
6999 +}
7000 +
7001 static inline int atomic_xchg(atomic_t *v, int new)
7002 {
7003 return xchg(&v->counter, new);
7004 }
7005
7006 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7007 +{
7008 + return xchg(&v->counter, new);
7009 +}
7010 +
7011 /**
7012 * atomic_add_unless - add unless the number is already a given value
7013 * @v: pointer of type atomic_t
7014 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7015 */
7016 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7017 {
7018 - int c, old;
7019 + int c, old, new;
7020 c = atomic_read(v);
7021 for (;;) {
7022 - if (unlikely(c == (u)))
7023 + if (unlikely(c == u))
7024 break;
7025 - old = atomic_cmpxchg((v), c, c + (a));
7026 +
7027 + asm volatile("addl %2,%0\n"
7028 +
7029 +#ifdef CONFIG_PAX_REFCOUNT
7030 + "jno 0f\n"
7031 + "subl %2,%0\n"
7032 + "int $4\n0:\n"
7033 + _ASM_EXTABLE(0b, 0b)
7034 +#endif
7035 +
7036 + : "=r" (new)
7037 + : "0" (c), "ir" (a));
7038 +
7039 + old = atomic_cmpxchg(v, c, new);
7040 if (likely(old == c))
7041 break;
7042 c = old;
7043 }
7044 - return c != (u);
7045 + return c != u;
7046 }
7047
7048 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7049
7050 #define atomic_inc_return(v) (atomic_add_return(1, v))
7051 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7052 +{
7053 + return atomic_add_return_unchecked(1, v);
7054 +}
7055 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7056
7057 /* These are x86-specific, used by some header files */
7058 @@ -266,9 +495,18 @@ typedef struct {
7059 u64 __aligned(8) counter;
7060 } atomic64_t;
7061
7062 +#ifdef CONFIG_PAX_REFCOUNT
7063 +typedef struct {
7064 + u64 __aligned(8) counter;
7065 +} atomic64_unchecked_t;
7066 +#else
7067 +typedef atomic64_t atomic64_unchecked_t;
7068 +#endif
7069 +
7070 #define ATOMIC64_INIT(val) { (val) }
7071
7072 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7073 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7074
7075 /**
7076 * atomic64_xchg - xchg atomic64 variable
7077 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7078 * the old value.
7079 */
7080 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7081 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7082
7083 /**
7084 * atomic64_set - set atomic64 variable
7085 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7086 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7087
7088 /**
7089 + * atomic64_unchecked_set - set atomic64 variable
7090 + * @ptr: pointer to type atomic64_unchecked_t
7091 + * @new_val: value to assign
7092 + *
7093 + * Atomically sets the value of @ptr to @new_val.
7094 + */
7095 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7096 +
7097 +/**
7098 * atomic64_read - read atomic64 variable
7099 * @ptr: pointer to type atomic64_t
7100 *
7101 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7102 return res;
7103 }
7104
7105 -extern u64 atomic64_read(atomic64_t *ptr);
7106 +/**
7107 + * atomic64_read_unchecked - read atomic64 variable
7108 + * @ptr: pointer to type atomic64_unchecked_t
7109 + *
7110 + * Atomically reads the value of @ptr and returns it.
7111 + */
7112 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7113 +{
7114 + u64 res;
7115 +
7116 + /*
7117 + * Note, we inline this atomic64_unchecked_t primitive because
7118 + * it only clobbers EAX/EDX and leaves the others
7119 + * untouched. We also (somewhat subtly) rely on the
7120 + * fact that cmpxchg8b returns the current 64-bit value
7121 + * of the memory location we are touching:
7122 + */
7123 + asm volatile(
7124 + "mov %%ebx, %%eax\n\t"
7125 + "mov %%ecx, %%edx\n\t"
7126 + LOCK_PREFIX "cmpxchg8b %1\n"
7127 + : "=&A" (res)
7128 + : "m" (*ptr)
7129 + );
7130 +
7131 + return res;
7132 +}
7133
7134 /**
7135 * atomic64_add_return - add and return
7136 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7137 * Other variants with different arithmetic operators:
7138 */
7139 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7140 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7141 extern u64 atomic64_inc_return(atomic64_t *ptr);
7142 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7143 extern u64 atomic64_dec_return(atomic64_t *ptr);
7144 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7145
7146 /**
7147 * atomic64_add - add integer to atomic64 variable
7148 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7149 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7150
7151 /**
7152 + * atomic64_add_unchecked - add integer to atomic64 variable
7153 + * @delta: integer value to add
7154 + * @ptr: pointer to type atomic64_unchecked_t
7155 + *
7156 + * Atomically adds @delta to @ptr.
7157 + */
7158 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7159 +
7160 +/**
7161 * atomic64_sub - subtract the atomic64 variable
7162 * @delta: integer value to subtract
7163 * @ptr: pointer to type atomic64_t
7164 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7165 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7166
7167 /**
7168 + * atomic64_sub_unchecked - subtract the atomic64 variable
7169 + * @delta: integer value to subtract
7170 + * @ptr: pointer to type atomic64_unchecked_t
7171 + *
7172 + * Atomically subtracts @delta from @ptr.
7173 + */
7174 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7175 +
7176 +/**
7177 * atomic64_sub_and_test - subtract value from variable and test result
7178 * @delta: integer value to subtract
7179 * @ptr: pointer to type atomic64_t
7180 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7181 extern void atomic64_inc(atomic64_t *ptr);
7182
7183 /**
7184 + * atomic64_inc_unchecked - increment atomic64 variable
7185 + * @ptr: pointer to type atomic64_unchecked_t
7186 + *
7187 + * Atomically increments @ptr by 1.
7188 + */
7189 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7190 +
7191 +/**
7192 * atomic64_dec - decrement atomic64 variable
7193 * @ptr: pointer to type atomic64_t
7194 *
7195 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7196 extern void atomic64_dec(atomic64_t *ptr);
7197
7198 /**
7199 + * atomic64_dec_unchecked - decrement atomic64 variable
7200 + * @ptr: pointer to type atomic64_unchecked_t
7201 + *
7202 + * Atomically decrements @ptr by 1.
7203 + */
7204 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7205 +
7206 +/**
7207 * atomic64_dec_and_test - decrement and test
7208 * @ptr: pointer to type atomic64_t
7209 *
7210 diff -urNp linux-2.6.32.42/arch/x86/include/asm/atomic_64.h linux-2.6.32.42/arch/x86/include/asm/atomic_64.h
7211 --- linux-2.6.32.42/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7212 +++ linux-2.6.32.42/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7213 @@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7214 }
7215
7216 /**
7217 + * atomic_read_unchecked - read atomic variable
7218 + * @v: pointer of type atomic_unchecked_t
7219 + *
7220 + * Atomically reads the value of @v.
7221 + */
7222 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7223 +{
7224 + return v->counter;
7225 +}
7226 +
7227 +/**
7228 * atomic_set - set atomic variable
7229 * @v: pointer of type atomic_t
7230 * @i: required value
7231 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7232 }
7233
7234 /**
7235 + * atomic_set_unchecked - set atomic variable
7236 + * @v: pointer of type atomic_unchecked_t
7237 + * @i: required value
7238 + *
7239 + * Atomically sets the value of @v to @i.
7240 + */
7241 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7242 +{
7243 + v->counter = i;
7244 +}
7245 +
7246 +/**
7247 * atomic_add - add integer to atomic variable
7248 * @i: integer value to add
7249 * @v: pointer of type atomic_t
7250 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7251 */
7252 static inline void atomic_add(int i, atomic_t *v)
7253 {
7254 - asm volatile(LOCK_PREFIX "addl %1,%0"
7255 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7256 +
7257 +#ifdef CONFIG_PAX_REFCOUNT
7258 + "jno 0f\n"
7259 + LOCK_PREFIX "subl %1,%0\n"
7260 + "int $4\n0:\n"
7261 + _ASM_EXTABLE(0b, 0b)
7262 +#endif
7263 +
7264 + : "=m" (v->counter)
7265 + : "ir" (i), "m" (v->counter));
7266 +}
7267 +
7268 +/**
7269 + * atomic_add_unchecked - add integer to atomic variable
7270 + * @i: integer value to add
7271 + * @v: pointer of type atomic_unchecked_t
7272 + *
7273 + * Atomically adds @i to @v.
7274 + */
7275 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7276 +{
7277 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7278 : "=m" (v->counter)
7279 : "ir" (i), "m" (v->counter));
7280 }
7281 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7282 */
7283 static inline void atomic_sub(int i, atomic_t *v)
7284 {
7285 - asm volatile(LOCK_PREFIX "subl %1,%0"
7286 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7287 +
7288 +#ifdef CONFIG_PAX_REFCOUNT
7289 + "jno 0f\n"
7290 + LOCK_PREFIX "addl %1,%0\n"
7291 + "int $4\n0:\n"
7292 + _ASM_EXTABLE(0b, 0b)
7293 +#endif
7294 +
7295 + : "=m" (v->counter)
7296 + : "ir" (i), "m" (v->counter));
7297 +}
7298 +
7299 +/**
7300 + * atomic_sub_unchecked - subtract the atomic variable
7301 + * @i: integer value to subtract
7302 + * @v: pointer of type atomic_unchecked_t
7303 + *
7304 + * Atomically subtracts @i from @v.
7305 + */
7306 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7307 +{
7308 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7309 : "=m" (v->counter)
7310 : "ir" (i), "m" (v->counter));
7311 }
7312 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7313 {
7314 unsigned char c;
7315
7316 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7317 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7318 +
7319 +#ifdef CONFIG_PAX_REFCOUNT
7320 + "jno 0f\n"
7321 + LOCK_PREFIX "addl %2,%0\n"
7322 + "int $4\n0:\n"
7323 + _ASM_EXTABLE(0b, 0b)
7324 +#endif
7325 +
7326 + "sete %1\n"
7327 : "=m" (v->counter), "=qm" (c)
7328 : "ir" (i), "m" (v->counter) : "memory");
7329 return c;
7330 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7331 */
7332 static inline void atomic_inc(atomic_t *v)
7333 {
7334 - asm volatile(LOCK_PREFIX "incl %0"
7335 + asm volatile(LOCK_PREFIX "incl %0\n"
7336 +
7337 +#ifdef CONFIG_PAX_REFCOUNT
7338 + "jno 0f\n"
7339 + LOCK_PREFIX "decl %0\n"
7340 + "int $4\n0:\n"
7341 + _ASM_EXTABLE(0b, 0b)
7342 +#endif
7343 +
7344 + : "=m" (v->counter)
7345 + : "m" (v->counter));
7346 +}
7347 +
7348 +/**
7349 + * atomic_inc_unchecked - increment atomic variable
7350 + * @v: pointer of type atomic_unchecked_t
7351 + *
7352 + * Atomically increments @v by 1.
7353 + */
7354 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7355 +{
7356 + asm volatile(LOCK_PREFIX "incl %0\n"
7357 : "=m" (v->counter)
7358 : "m" (v->counter));
7359 }
7360 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7361 */
7362 static inline void atomic_dec(atomic_t *v)
7363 {
7364 - asm volatile(LOCK_PREFIX "decl %0"
7365 + asm volatile(LOCK_PREFIX "decl %0\n"
7366 +
7367 +#ifdef CONFIG_PAX_REFCOUNT
7368 + "jno 0f\n"
7369 + LOCK_PREFIX "incl %0\n"
7370 + "int $4\n0:\n"
7371 + _ASM_EXTABLE(0b, 0b)
7372 +#endif
7373 +
7374 + : "=m" (v->counter)
7375 + : "m" (v->counter));
7376 +}
7377 +
7378 +/**
7379 + * atomic_dec_unchecked - decrement atomic variable
7380 + * @v: pointer of type atomic_unchecked_t
7381 + *
7382 + * Atomically decrements @v by 1.
7383 + */
7384 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7385 +{
7386 + asm volatile(LOCK_PREFIX "decl %0\n"
7387 : "=m" (v->counter)
7388 : "m" (v->counter));
7389 }
7390 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7391 {
7392 unsigned char c;
7393
7394 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7395 + asm volatile(LOCK_PREFIX "decl %0\n"
7396 +
7397 +#ifdef CONFIG_PAX_REFCOUNT
7398 + "jno 0f\n"
7399 + LOCK_PREFIX "incl %0\n"
7400 + "int $4\n0:\n"
7401 + _ASM_EXTABLE(0b, 0b)
7402 +#endif
7403 +
7404 + "sete %1\n"
7405 : "=m" (v->counter), "=qm" (c)
7406 : "m" (v->counter) : "memory");
7407 return c != 0;
7408 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7409 {
7410 unsigned char c;
7411
7412 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7413 + asm volatile(LOCK_PREFIX "incl %0\n"
7414 +
7415 +#ifdef CONFIG_PAX_REFCOUNT
7416 + "jno 0f\n"
7417 + LOCK_PREFIX "decl %0\n"
7418 + "int $4\n0:\n"
7419 + _ASM_EXTABLE(0b, 0b)
7420 +#endif
7421 +
7422 + "sete %1\n"
7423 + : "=m" (v->counter), "=qm" (c)
7424 + : "m" (v->counter) : "memory");
7425 + return c != 0;
7426 +}
7427 +
7428 +/**
7429 + * atomic_inc_and_test_unchecked - increment and test
7430 + * @v: pointer of type atomic_unchecked_t
7431 + *
7432 + * Atomically increments @v by 1
7433 + * and returns true if the result is zero, or false for all
7434 + * other cases.
7435 + */
7436 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7437 +{
7438 + unsigned char c;
7439 +
7440 + asm volatile(LOCK_PREFIX "incl %0\n"
7441 + "sete %1\n"
7442 : "=m" (v->counter), "=qm" (c)
7443 : "m" (v->counter) : "memory");
7444 return c != 0;
7445 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7446 {
7447 unsigned char c;
7448
7449 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7450 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7451 +
7452 +#ifdef CONFIG_PAX_REFCOUNT
7453 + "jno 0f\n"
7454 + LOCK_PREFIX "subl %2,%0\n"
7455 + "int $4\n0:\n"
7456 + _ASM_EXTABLE(0b, 0b)
7457 +#endif
7458 +
7459 + "sets %1\n"
7460 : "=m" (v->counter), "=qm" (c)
7461 : "ir" (i), "m" (v->counter) : "memory");
7462 return c;
7463 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7464 static inline int atomic_add_return(int i, atomic_t *v)
7465 {
7466 int __i = i;
7467 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
7468 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7469 +
7470 +#ifdef CONFIG_PAX_REFCOUNT
7471 + "jno 0f\n"
7472 + "movl %0, %1\n"
7473 + "int $4\n0:\n"
7474 + _ASM_EXTABLE(0b, 0b)
7475 +#endif
7476 +
7477 + : "+r" (i), "+m" (v->counter)
7478 + : : "memory");
7479 + return i + __i;
7480 +}
7481 +
7482 +/**
7483 + * atomic_add_return_unchecked - add and return
7484 + * @i: integer value to add
7485 + * @v: pointer of type atomic_unchecked_t
7486 + *
7487 + * Atomically adds @i to @v and returns @i + @v
7488 + */
7489 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7490 +{
7491 + int __i = i;
7492 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7493 : "+r" (i), "+m" (v->counter)
7494 : : "memory");
7495 return i + __i;
7496 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7497 }
7498
7499 #define atomic_inc_return(v) (atomic_add_return(1, v))
7500 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7501 +{
7502 + return atomic_add_return_unchecked(1, v);
7503 +}
7504 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7505
7506 /* The 64-bit atomic type */
7507 @@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7508 }
7509
7510 /**
7511 + * atomic64_read_unchecked - read atomic64 variable
7512 + * @v: pointer of type atomic64_unchecked_t
7513 + *
7514 + * Atomically reads the value of @v.
7515 + * Doesn't imply a read memory barrier.
7516 + */
7517 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7518 +{
7519 + return v->counter;
7520 +}
7521 +
7522 +/**
7523 * atomic64_set - set atomic64 variable
7524 * @v: pointer to type atomic64_t
7525 * @i: required value
7526 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7527 }
7528
7529 /**
7530 + * atomic64_set_unchecked - set atomic64 variable
7531 + * @v: pointer to type atomic64_unchecked_t
7532 + * @i: required value
7533 + *
7534 + * Atomically sets the value of @v to @i.
7535 + */
7536 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7537 +{
7538 + v->counter = i;
7539 +}
7540 +
7541 +/**
7542 * atomic64_add - add integer to atomic64 variable
7543 * @i: integer value to add
7544 * @v: pointer to type atomic64_t
7545 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7546 */
7547 static inline void atomic64_add(long i, atomic64_t *v)
7548 {
7549 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7550 +
7551 +#ifdef CONFIG_PAX_REFCOUNT
7552 + "jno 0f\n"
7553 + LOCK_PREFIX "subq %1,%0\n"
7554 + "int $4\n0:\n"
7555 + _ASM_EXTABLE(0b, 0b)
7556 +#endif
7557 +
7558 + : "=m" (v->counter)
7559 + : "er" (i), "m" (v->counter));
7560 +}
7561 +
7562 +/**
7563 + * atomic64_add_unchecked - add integer to atomic64 variable
7564 + * @i: integer value to add
7565 + * @v: pointer to type atomic64_unchecked_t
7566 + *
7567 + * Atomically adds @i to @v.
7568 + */
7569 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7570 +{
7571 asm volatile(LOCK_PREFIX "addq %1,%0"
7572 : "=m" (v->counter)
7573 : "er" (i), "m" (v->counter));
7574 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7575 */
7576 static inline void atomic64_sub(long i, atomic64_t *v)
7577 {
7578 - asm volatile(LOCK_PREFIX "subq %1,%0"
7579 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7580 +
7581 +#ifdef CONFIG_PAX_REFCOUNT
7582 + "jno 0f\n"
7583 + LOCK_PREFIX "addq %1,%0\n"
7584 + "int $4\n0:\n"
7585 + _ASM_EXTABLE(0b, 0b)
7586 +#endif
7587 +
7588 : "=m" (v->counter)
7589 : "er" (i), "m" (v->counter));
7590 }
7591 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7592 {
7593 unsigned char c;
7594
7595 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7596 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7597 +
7598 +#ifdef CONFIG_PAX_REFCOUNT
7599 + "jno 0f\n"
7600 + LOCK_PREFIX "addq %2,%0\n"
7601 + "int $4\n0:\n"
7602 + _ASM_EXTABLE(0b, 0b)
7603 +#endif
7604 +
7605 + "sete %1\n"
7606 : "=m" (v->counter), "=qm" (c)
7607 : "er" (i), "m" (v->counter) : "memory");
7608 return c;
7609 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7610 */
7611 static inline void atomic64_inc(atomic64_t *v)
7612 {
7613 + asm volatile(LOCK_PREFIX "incq %0\n"
7614 +
7615 +#ifdef CONFIG_PAX_REFCOUNT
7616 + "jno 0f\n"
7617 + LOCK_PREFIX "decq %0\n"
7618 + "int $4\n0:\n"
7619 + _ASM_EXTABLE(0b, 0b)
7620 +#endif
7621 +
7622 + : "=m" (v->counter)
7623 + : "m" (v->counter));
7624 +}
7625 +
7626 +/**
7627 + * atomic64_inc_unchecked - increment atomic64 variable
7628 + * @v: pointer to type atomic64_unchecked_t
7629 + *
7630 + * Atomically increments @v by 1.
7631 + */
7632 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7633 +{
7634 asm volatile(LOCK_PREFIX "incq %0"
7635 : "=m" (v->counter)
7636 : "m" (v->counter));
7637 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7638 */
7639 static inline void atomic64_dec(atomic64_t *v)
7640 {
7641 - asm volatile(LOCK_PREFIX "decq %0"
7642 + asm volatile(LOCK_PREFIX "decq %0\n"
7643 +
7644 +#ifdef CONFIG_PAX_REFCOUNT
7645 + "jno 0f\n"
7646 + LOCK_PREFIX "incq %0\n"
7647 + "int $4\n0:\n"
7648 + _ASM_EXTABLE(0b, 0b)
7649 +#endif
7650 +
7651 + : "=m" (v->counter)
7652 + : "m" (v->counter));
7653 +}
7654 +
7655 +/**
7656 + * atomic64_dec_unchecked - decrement atomic64 variable
7657 + * @v: pointer to type atomic64_t
7658 + *
7659 + * Atomically decrements @v by 1.
7660 + */
7661 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7662 +{
7663 + asm volatile(LOCK_PREFIX "decq %0\n"
7664 : "=m" (v->counter)
7665 : "m" (v->counter));
7666 }
7667 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
7668 {
7669 unsigned char c;
7670
7671 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7672 + asm volatile(LOCK_PREFIX "decq %0\n"
7673 +
7674 +#ifdef CONFIG_PAX_REFCOUNT
7675 + "jno 0f\n"
7676 + LOCK_PREFIX "incq %0\n"
7677 + "int $4\n0:\n"
7678 + _ASM_EXTABLE(0b, 0b)
7679 +#endif
7680 +
7681 + "sete %1\n"
7682 : "=m" (v->counter), "=qm" (c)
7683 : "m" (v->counter) : "memory");
7684 return c != 0;
7685 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
7686 {
7687 unsigned char c;
7688
7689 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7690 + asm volatile(LOCK_PREFIX "incq %0\n"
7691 +
7692 +#ifdef CONFIG_PAX_REFCOUNT
7693 + "jno 0f\n"
7694 + LOCK_PREFIX "decq %0\n"
7695 + "int $4\n0:\n"
7696 + _ASM_EXTABLE(0b, 0b)
7697 +#endif
7698 +
7699 + "sete %1\n"
7700 : "=m" (v->counter), "=qm" (c)
7701 : "m" (v->counter) : "memory");
7702 return c != 0;
7703 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
7704 {
7705 unsigned char c;
7706
7707 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7708 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7709 +
7710 +#ifdef CONFIG_PAX_REFCOUNT
7711 + "jno 0f\n"
7712 + LOCK_PREFIX "subq %2,%0\n"
7713 + "int $4\n0:\n"
7714 + _ASM_EXTABLE(0b, 0b)
7715 +#endif
7716 +
7717 + "sets %1\n"
7718 : "=m" (v->counter), "=qm" (c)
7719 : "er" (i), "m" (v->counter) : "memory");
7720 return c;
7721 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
7722 static inline long atomic64_add_return(long i, atomic64_t *v)
7723 {
7724 long __i = i;
7725 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7726 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7727 +
7728 +#ifdef CONFIG_PAX_REFCOUNT
7729 + "jno 0f\n"
7730 + "movq %0, %1\n"
7731 + "int $4\n0:\n"
7732 + _ASM_EXTABLE(0b, 0b)
7733 +#endif
7734 +
7735 + : "+r" (i), "+m" (v->counter)
7736 + : : "memory");
7737 + return i + __i;
7738 +}
7739 +
7740 +/**
7741 + * atomic64_add_return_unchecked - add and return
7742 + * @i: integer value to add
7743 + * @v: pointer to type atomic64_unchecked_t
7744 + *
7745 + * Atomically adds @i to @v and returns @i + @v
7746 + */
7747 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7748 +{
7749 + long __i = i;
7750 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
7751 : "+r" (i), "+m" (v->counter)
7752 : : "memory");
7753 return i + __i;
7754 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
7755 }
7756
7757 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7758 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7759 +{
7760 + return atomic64_add_return_unchecked(1, v);
7761 +}
7762 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7763
7764 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7765 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
7766 return cmpxchg(&v->counter, old, new);
7767 }
7768
7769 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7770 +{
7771 + return cmpxchg(&v->counter, old, new);
7772 +}
7773 +
7774 static inline long atomic64_xchg(atomic64_t *v, long new)
7775 {
7776 return xchg(&v->counter, new);
7777 }
7778
7779 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7780 +{
7781 + return xchg(&v->counter, new);
7782 +}
7783 +
7784 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
7785 {
7786 return cmpxchg(&v->counter, old, new);
7787 }
7788
7789 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7790 +{
7791 + return cmpxchg(&v->counter, old, new);
7792 +}
7793 +
7794 static inline long atomic_xchg(atomic_t *v, int new)
7795 {
7796 return xchg(&v->counter, new);
7797 }
7798
7799 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7800 +{
7801 + return xchg(&v->counter, new);
7802 +}
7803 +
7804 /**
7805 * atomic_add_unless - add unless the number is a given value
7806 * @v: pointer of type atomic_t
7807 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
7808 */
7809 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7810 {
7811 - int c, old;
7812 + int c, old, new;
7813 c = atomic_read(v);
7814 for (;;) {
7815 - if (unlikely(c == (u)))
7816 + if (unlikely(c == u))
7817 break;
7818 - old = atomic_cmpxchg((v), c, c + (a));
7819 +
7820 + asm volatile("addl %2,%0\n"
7821 +
7822 +#ifdef CONFIG_PAX_REFCOUNT
7823 + "jno 0f\n"
7824 + "subl %2,%0\n"
7825 + "int $4\n0:\n"
7826 + _ASM_EXTABLE(0b, 0b)
7827 +#endif
7828 +
7829 + : "=r" (new)
7830 + : "0" (c), "ir" (a));
7831 +
7832 + old = atomic_cmpxchg(v, c, new);
7833 if (likely(old == c))
7834 break;
7835 c = old;
7836 }
7837 - return c != (u);
7838 + return c != u;
7839 }
7840
7841 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7842 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
7843 */
7844 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
7845 {
7846 - long c, old;
7847 + long c, old, new;
7848 c = atomic64_read(v);
7849 for (;;) {
7850 - if (unlikely(c == (u)))
7851 + if (unlikely(c == u))
7852 break;
7853 - old = atomic64_cmpxchg((v), c, c + (a));
7854 +
7855 + asm volatile("addq %2,%0\n"
7856 +
7857 +#ifdef CONFIG_PAX_REFCOUNT
7858 + "jno 0f\n"
7859 + "subq %2,%0\n"
7860 + "int $4\n0:\n"
7861 + _ASM_EXTABLE(0b, 0b)
7862 +#endif
7863 +
7864 + : "=r" (new)
7865 + : "0" (c), "er" (a));
7866 +
7867 + old = atomic64_cmpxchg(v, c, new);
7868 if (likely(old == c))
7869 break;
7870 c = old;
7871 }
7872 - return c != (u);
7873 + return c != u;
7874 }
7875
7876 /**
7877 diff -urNp linux-2.6.32.42/arch/x86/include/asm/bitops.h linux-2.6.32.42/arch/x86/include/asm/bitops.h
7878 --- linux-2.6.32.42/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
7879 +++ linux-2.6.32.42/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
7880 @@ -38,7 +38,7 @@
7881 * a mask operation on a byte.
7882 */
7883 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
7884 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
7885 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
7886 #define CONST_MASK(nr) (1 << ((nr) & 7))
7887
7888 /**
7889 diff -urNp linux-2.6.32.42/arch/x86/include/asm/boot.h linux-2.6.32.42/arch/x86/include/asm/boot.h
7890 --- linux-2.6.32.42/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
7891 +++ linux-2.6.32.42/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
7892 @@ -11,10 +11,15 @@
7893 #include <asm/pgtable_types.h>
7894
7895 /* Physical address where kernel should be loaded. */
7896 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7897 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7898 + (CONFIG_PHYSICAL_ALIGN - 1)) \
7899 & ~(CONFIG_PHYSICAL_ALIGN - 1))
7900
7901 +#ifndef __ASSEMBLY__
7902 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
7903 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
7904 +#endif
7905 +
7906 /* Minimum kernel alignment, as a power of two */
7907 #ifdef CONFIG_X86_64
7908 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
7909 diff -urNp linux-2.6.32.42/arch/x86/include/asm/cacheflush.h linux-2.6.32.42/arch/x86/include/asm/cacheflush.h
7910 --- linux-2.6.32.42/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
7911 +++ linux-2.6.32.42/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
7912 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
7913 static inline unsigned long get_page_memtype(struct page *pg)
7914 {
7915 if (!PageUncached(pg) && !PageWC(pg))
7916 - return -1;
7917 + return ~0UL;
7918 else if (!PageUncached(pg) && PageWC(pg))
7919 return _PAGE_CACHE_WC;
7920 else if (PageUncached(pg) && !PageWC(pg))
7921 @@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
7922 SetPageWC(pg);
7923 break;
7924 default:
7925 - case -1:
7926 + case ~0UL:
7927 ClearPageUncached(pg);
7928 ClearPageWC(pg);
7929 break;
7930 diff -urNp linux-2.6.32.42/arch/x86/include/asm/cache.h linux-2.6.32.42/arch/x86/include/asm/cache.h
7931 --- linux-2.6.32.42/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
7932 +++ linux-2.6.32.42/arch/x86/include/asm/cache.h 2011-05-04 17:56:20.000000000 -0400
7933 @@ -5,9 +5,10 @@
7934
7935 /* L1 cache line size */
7936 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7937 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7938 +#define L1_CACHE_BYTES (_AC(1,U) << L1_CACHE_SHIFT)
7939
7940 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
7941 +#define __read_only __attribute__((__section__(".data.read_only")))
7942
7943 #ifdef CONFIG_X86_VSMP
7944 /* vSMP Internode cacheline shift */
7945 diff -urNp linux-2.6.32.42/arch/x86/include/asm/checksum_32.h linux-2.6.32.42/arch/x86/include/asm/checksum_32.h
7946 --- linux-2.6.32.42/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
7947 +++ linux-2.6.32.42/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
7948 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7949 int len, __wsum sum,
7950 int *src_err_ptr, int *dst_err_ptr);
7951
7952 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7953 + int len, __wsum sum,
7954 + int *src_err_ptr, int *dst_err_ptr);
7955 +
7956 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7957 + int len, __wsum sum,
7958 + int *src_err_ptr, int *dst_err_ptr);
7959 +
7960 /*
7961 * Note: when you get a NULL pointer exception here this means someone
7962 * passed in an incorrect kernel address to one of these functions.
7963 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7964 int *err_ptr)
7965 {
7966 might_sleep();
7967 - return csum_partial_copy_generic((__force void *)src, dst,
7968 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
7969 len, sum, err_ptr, NULL);
7970 }
7971
7972 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7973 {
7974 might_sleep();
7975 if (access_ok(VERIFY_WRITE, dst, len))
7976 - return csum_partial_copy_generic(src, (__force void *)dst,
7977 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7978 len, sum, NULL, err_ptr);
7979
7980 if (len)
7981 diff -urNp linux-2.6.32.42/arch/x86/include/asm/desc_defs.h linux-2.6.32.42/arch/x86/include/asm/desc_defs.h
7982 --- linux-2.6.32.42/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
7983 +++ linux-2.6.32.42/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
7984 @@ -31,6 +31,12 @@ struct desc_struct {
7985 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7986 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7987 };
7988 + struct {
7989 + u16 offset_low;
7990 + u16 seg;
7991 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7992 + unsigned offset_high: 16;
7993 + } gate;
7994 };
7995 } __attribute__((packed));
7996
7997 diff -urNp linux-2.6.32.42/arch/x86/include/asm/desc.h linux-2.6.32.42/arch/x86/include/asm/desc.h
7998 --- linux-2.6.32.42/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
7999 +++ linux-2.6.32.42/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8000 @@ -4,6 +4,7 @@
8001 #include <asm/desc_defs.h>
8002 #include <asm/ldt.h>
8003 #include <asm/mmu.h>
8004 +#include <asm/pgtable.h>
8005 #include <linux/smp.h>
8006
8007 static inline void fill_ldt(struct desc_struct *desc,
8008 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8009 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8010 desc->type = (info->read_exec_only ^ 1) << 1;
8011 desc->type |= info->contents << 2;
8012 + desc->type |= info->seg_not_present ^ 1;
8013 desc->s = 1;
8014 desc->dpl = 0x3;
8015 desc->p = info->seg_not_present ^ 1;
8016 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8017 }
8018
8019 extern struct desc_ptr idt_descr;
8020 -extern gate_desc idt_table[];
8021 -
8022 -struct gdt_page {
8023 - struct desc_struct gdt[GDT_ENTRIES];
8024 -} __attribute__((aligned(PAGE_SIZE)));
8025 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8026 +extern gate_desc idt_table[256];
8027
8028 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8029 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8030 {
8031 - return per_cpu(gdt_page, cpu).gdt;
8032 + return cpu_gdt_table[cpu];
8033 }
8034
8035 #ifdef CONFIG_X86_64
8036 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8037 unsigned long base, unsigned dpl, unsigned flags,
8038 unsigned short seg)
8039 {
8040 - gate->a = (seg << 16) | (base & 0xffff);
8041 - gate->b = (base & 0xffff0000) |
8042 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8043 + gate->gate.offset_low = base;
8044 + gate->gate.seg = seg;
8045 + gate->gate.reserved = 0;
8046 + gate->gate.type = type;
8047 + gate->gate.s = 0;
8048 + gate->gate.dpl = dpl;
8049 + gate->gate.p = 1;
8050 + gate->gate.offset_high = base >> 16;
8051 }
8052
8053 #endif
8054 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8055 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8056 const gate_desc *gate)
8057 {
8058 + pax_open_kernel();
8059 memcpy(&idt[entry], gate, sizeof(*gate));
8060 + pax_close_kernel();
8061 }
8062
8063 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8064 const void *desc)
8065 {
8066 + pax_open_kernel();
8067 memcpy(&ldt[entry], desc, 8);
8068 + pax_close_kernel();
8069 }
8070
8071 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8072 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8073 size = sizeof(struct desc_struct);
8074 break;
8075 }
8076 +
8077 + pax_open_kernel();
8078 memcpy(&gdt[entry], desc, size);
8079 + pax_close_kernel();
8080 }
8081
8082 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8083 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8084
8085 static inline void native_load_tr_desc(void)
8086 {
8087 + pax_open_kernel();
8088 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8089 + pax_close_kernel();
8090 }
8091
8092 static inline void native_load_gdt(const struct desc_ptr *dtr)
8093 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8094 unsigned int i;
8095 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8096
8097 + pax_open_kernel();
8098 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8099 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8100 + pax_close_kernel();
8101 }
8102
8103 #define _LDT_empty(info) \
8104 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8105 desc->limit = (limit >> 16) & 0xf;
8106 }
8107
8108 -static inline void _set_gate(int gate, unsigned type, void *addr,
8109 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8110 unsigned dpl, unsigned ist, unsigned seg)
8111 {
8112 gate_desc s;
8113 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8114 * Pentium F0 0F bugfix can have resulted in the mapped
8115 * IDT being write-protected.
8116 */
8117 -static inline void set_intr_gate(unsigned int n, void *addr)
8118 +static inline void set_intr_gate(unsigned int n, const void *addr)
8119 {
8120 BUG_ON((unsigned)n > 0xFF);
8121 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8122 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8123 /*
8124 * This routine sets up an interrupt gate at directory privilege level 3.
8125 */
8126 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8127 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8128 {
8129 BUG_ON((unsigned)n > 0xFF);
8130 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8131 }
8132
8133 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8134 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8135 {
8136 BUG_ON((unsigned)n > 0xFF);
8137 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8138 }
8139
8140 -static inline void set_trap_gate(unsigned int n, void *addr)
8141 +static inline void set_trap_gate(unsigned int n, const void *addr)
8142 {
8143 BUG_ON((unsigned)n > 0xFF);
8144 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8145 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8146 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8147 {
8148 BUG_ON((unsigned)n > 0xFF);
8149 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8150 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8151 }
8152
8153 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8154 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8155 {
8156 BUG_ON((unsigned)n > 0xFF);
8157 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8158 }
8159
8160 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8161 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8162 {
8163 BUG_ON((unsigned)n > 0xFF);
8164 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8165 }
8166
8167 +#ifdef CONFIG_X86_32
8168 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8169 +{
8170 + struct desc_struct d;
8171 +
8172 + if (likely(limit))
8173 + limit = (limit - 1UL) >> PAGE_SHIFT;
8174 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8175 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8176 +}
8177 +#endif
8178 +
8179 #endif /* _ASM_X86_DESC_H */
8180 diff -urNp linux-2.6.32.42/arch/x86/include/asm/device.h linux-2.6.32.42/arch/x86/include/asm/device.h
8181 --- linux-2.6.32.42/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8182 +++ linux-2.6.32.42/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8183 @@ -6,7 +6,7 @@ struct dev_archdata {
8184 void *acpi_handle;
8185 #endif
8186 #ifdef CONFIG_X86_64
8187 -struct dma_map_ops *dma_ops;
8188 + const struct dma_map_ops *dma_ops;
8189 #endif
8190 #ifdef CONFIG_DMAR
8191 void *iommu; /* hook for IOMMU specific extension */
8192 diff -urNp linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h
8193 --- linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8194 +++ linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8195 @@ -25,9 +25,9 @@ extern int iommu_merge;
8196 extern struct device x86_dma_fallback_dev;
8197 extern int panic_on_overflow;
8198
8199 -extern struct dma_map_ops *dma_ops;
8200 +extern const struct dma_map_ops *dma_ops;
8201
8202 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8203 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8204 {
8205 #ifdef CONFIG_X86_32
8206 return dma_ops;
8207 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8208 /* Make sure we keep the same behaviour */
8209 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8210 {
8211 - struct dma_map_ops *ops = get_dma_ops(dev);
8212 + const struct dma_map_ops *ops = get_dma_ops(dev);
8213 if (ops->mapping_error)
8214 return ops->mapping_error(dev, dma_addr);
8215
8216 @@ -122,7 +122,7 @@ static inline void *
8217 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8218 gfp_t gfp)
8219 {
8220 - struct dma_map_ops *ops = get_dma_ops(dev);
8221 + const struct dma_map_ops *ops = get_dma_ops(dev);
8222 void *memory;
8223
8224 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8225 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8226 static inline void dma_free_coherent(struct device *dev, size_t size,
8227 void *vaddr, dma_addr_t bus)
8228 {
8229 - struct dma_map_ops *ops = get_dma_ops(dev);
8230 + const struct dma_map_ops *ops = get_dma_ops(dev);
8231
8232 WARN_ON(irqs_disabled()); /* for portability */
8233
8234 diff -urNp linux-2.6.32.42/arch/x86/include/asm/e820.h linux-2.6.32.42/arch/x86/include/asm/e820.h
8235 --- linux-2.6.32.42/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8236 +++ linux-2.6.32.42/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8237 @@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8238 #define ISA_END_ADDRESS 0x100000
8239 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8240
8241 -#define BIOS_BEGIN 0x000a0000
8242 +#define BIOS_BEGIN 0x000c0000
8243 #define BIOS_END 0x00100000
8244
8245 #ifdef __KERNEL__
8246 diff -urNp linux-2.6.32.42/arch/x86/include/asm/elf.h linux-2.6.32.42/arch/x86/include/asm/elf.h
8247 --- linux-2.6.32.42/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8248 +++ linux-2.6.32.42/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8249 @@ -257,7 +257,25 @@ extern int force_personality32;
8250 the loader. We need to make sure that it is out of the way of the program
8251 that it will "exec", and that there is sufficient room for the brk. */
8252
8253 +#ifdef CONFIG_PAX_SEGMEXEC
8254 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8255 +#else
8256 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8257 +#endif
8258 +
8259 +#ifdef CONFIG_PAX_ASLR
8260 +#ifdef CONFIG_X86_32
8261 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8262 +
8263 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8264 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8265 +#else
8266 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8267 +
8268 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8269 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8270 +#endif
8271 +#endif
8272
8273 /* This yields a mask that user programs can use to figure out what
8274 instruction set this CPU supports. This could be done in user space,
8275 @@ -311,8 +329,7 @@ do { \
8276 #define ARCH_DLINFO \
8277 do { \
8278 if (vdso_enabled) \
8279 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8280 - (unsigned long)current->mm->context.vdso); \
8281 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8282 } while (0)
8283
8284 #define AT_SYSINFO 32
8285 @@ -323,7 +340,7 @@ do { \
8286
8287 #endif /* !CONFIG_X86_32 */
8288
8289 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8290 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8291
8292 #define VDSO_ENTRY \
8293 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8294 @@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8295 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8296 #define compat_arch_setup_additional_pages syscall32_setup_pages
8297
8298 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8299 -#define arch_randomize_brk arch_randomize_brk
8300 -
8301 #endif /* _ASM_X86_ELF_H */
8302 diff -urNp linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h
8303 --- linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8304 +++ linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8305 @@ -15,6 +15,6 @@ enum reboot_type {
8306
8307 extern enum reboot_type reboot_type;
8308
8309 -extern void machine_emergency_restart(void);
8310 +extern void machine_emergency_restart(void) __noreturn;
8311
8312 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8313 diff -urNp linux-2.6.32.42/arch/x86/include/asm/futex.h linux-2.6.32.42/arch/x86/include/asm/futex.h
8314 --- linux-2.6.32.42/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8315 +++ linux-2.6.32.42/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8316 @@ -12,16 +12,18 @@
8317 #include <asm/system.h>
8318
8319 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8320 + typecheck(u32 *, uaddr); \
8321 asm volatile("1:\t" insn "\n" \
8322 "2:\t.section .fixup,\"ax\"\n" \
8323 "3:\tmov\t%3, %1\n" \
8324 "\tjmp\t2b\n" \
8325 "\t.previous\n" \
8326 _ASM_EXTABLE(1b, 3b) \
8327 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8328 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8329 : "i" (-EFAULT), "0" (oparg), "1" (0))
8330
8331 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8332 + typecheck(u32 *, uaddr); \
8333 asm volatile("1:\tmovl %2, %0\n" \
8334 "\tmovl\t%0, %3\n" \
8335 "\t" insn "\n" \
8336 @@ -34,10 +36,10 @@
8337 _ASM_EXTABLE(1b, 4b) \
8338 _ASM_EXTABLE(2b, 4b) \
8339 : "=&a" (oldval), "=&r" (ret), \
8340 - "+m" (*uaddr), "=&r" (tem) \
8341 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8342 : "r" (oparg), "i" (-EFAULT), "1" (0))
8343
8344 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8345 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8346 {
8347 int op = (encoded_op >> 28) & 7;
8348 int cmp = (encoded_op >> 24) & 15;
8349 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8350
8351 switch (op) {
8352 case FUTEX_OP_SET:
8353 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8354 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8355 break;
8356 case FUTEX_OP_ADD:
8357 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8358 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8359 uaddr, oparg);
8360 break;
8361 case FUTEX_OP_OR:
8362 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8363 return ret;
8364 }
8365
8366 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8367 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8368 int newval)
8369 {
8370
8371 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8372 return -ENOSYS;
8373 #endif
8374
8375 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8376 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8377 return -EFAULT;
8378
8379 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8380 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8381 "2:\t.section .fixup, \"ax\"\n"
8382 "3:\tmov %2, %0\n"
8383 "\tjmp 2b\n"
8384 "\t.previous\n"
8385 _ASM_EXTABLE(1b, 3b)
8386 - : "=a" (oldval), "+m" (*uaddr)
8387 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8388 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8389 : "memory"
8390 );
8391 diff -urNp linux-2.6.32.42/arch/x86/include/asm/hw_irq.h linux-2.6.32.42/arch/x86/include/asm/hw_irq.h
8392 --- linux-2.6.32.42/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8393 +++ linux-2.6.32.42/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8394 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8395 extern void enable_IO_APIC(void);
8396
8397 /* Statistics */
8398 -extern atomic_t irq_err_count;
8399 -extern atomic_t irq_mis_count;
8400 +extern atomic_unchecked_t irq_err_count;
8401 +extern atomic_unchecked_t irq_mis_count;
8402
8403 /* EISA */
8404 extern void eisa_set_level_irq(unsigned int irq);
8405 diff -urNp linux-2.6.32.42/arch/x86/include/asm/i387.h linux-2.6.32.42/arch/x86/include/asm/i387.h
8406 --- linux-2.6.32.42/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8407 +++ linux-2.6.32.42/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8408 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8409 {
8410 int err;
8411
8412 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8413 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8414 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8415 +#endif
8416 +
8417 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8418 "2:\n"
8419 ".section .fixup,\"ax\"\n"
8420 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8421 {
8422 int err;
8423
8424 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8425 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8426 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8427 +#endif
8428 +
8429 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8430 "2:\n"
8431 ".section .fixup,\"ax\"\n"
8432 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8433 }
8434
8435 /* We need a safe address that is cheap to find and that is already
8436 - in L1 during context switch. The best choices are unfortunately
8437 - different for UP and SMP */
8438 -#ifdef CONFIG_SMP
8439 -#define safe_address (__per_cpu_offset[0])
8440 -#else
8441 -#define safe_address (kstat_cpu(0).cpustat.user)
8442 -#endif
8443 + in L1 during context switch. */
8444 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8445
8446 /*
8447 * These must be called with preempt disabled
8448 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8449 struct thread_info *me = current_thread_info();
8450 preempt_disable();
8451 if (me->status & TS_USEDFPU)
8452 - __save_init_fpu(me->task);
8453 + __save_init_fpu(current);
8454 else
8455 clts();
8456 }
8457 diff -urNp linux-2.6.32.42/arch/x86/include/asm/io_32.h linux-2.6.32.42/arch/x86/include/asm/io_32.h
8458 --- linux-2.6.32.42/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8459 +++ linux-2.6.32.42/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8460 @@ -3,6 +3,7 @@
8461
8462 #include <linux/string.h>
8463 #include <linux/compiler.h>
8464 +#include <asm/processor.h>
8465
8466 /*
8467 * This file contains the definitions for the x86 IO instructions
8468 @@ -42,6 +43,17 @@
8469
8470 #ifdef __KERNEL__
8471
8472 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8473 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8474 +{
8475 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8476 +}
8477 +
8478 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8479 +{
8480 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8481 +}
8482 +
8483 #include <asm-generic/iomap.h>
8484
8485 #include <linux/vmalloc.h>
8486 diff -urNp linux-2.6.32.42/arch/x86/include/asm/io_64.h linux-2.6.32.42/arch/x86/include/asm/io_64.h
8487 --- linux-2.6.32.42/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8488 +++ linux-2.6.32.42/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8489 @@ -140,6 +140,17 @@ __OUTS(l)
8490
8491 #include <linux/vmalloc.h>
8492
8493 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8494 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8495 +{
8496 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8497 +}
8498 +
8499 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8500 +{
8501 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8502 +}
8503 +
8504 #include <asm-generic/iomap.h>
8505
8506 void __memcpy_fromio(void *, unsigned long, unsigned);
8507 diff -urNp linux-2.6.32.42/arch/x86/include/asm/iommu.h linux-2.6.32.42/arch/x86/include/asm/iommu.h
8508 --- linux-2.6.32.42/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8509 +++ linux-2.6.32.42/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8510 @@ -3,7 +3,7 @@
8511
8512 extern void pci_iommu_shutdown(void);
8513 extern void no_iommu_init(void);
8514 -extern struct dma_map_ops nommu_dma_ops;
8515 +extern const struct dma_map_ops nommu_dma_ops;
8516 extern int force_iommu, no_iommu;
8517 extern int iommu_detected;
8518 extern int iommu_pass_through;
8519 diff -urNp linux-2.6.32.42/arch/x86/include/asm/irqflags.h linux-2.6.32.42/arch/x86/include/asm/irqflags.h
8520 --- linux-2.6.32.42/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8521 +++ linux-2.6.32.42/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8522 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8523 sti; \
8524 sysexit
8525
8526 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8527 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8528 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8529 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8530 +
8531 #else
8532 #define INTERRUPT_RETURN iret
8533 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8534 diff -urNp linux-2.6.32.42/arch/x86/include/asm/kprobes.h linux-2.6.32.42/arch/x86/include/asm/kprobes.h
8535 --- linux-2.6.32.42/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8536 +++ linux-2.6.32.42/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8537 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8538 #define BREAKPOINT_INSTRUCTION 0xcc
8539 #define RELATIVEJUMP_INSTRUCTION 0xe9
8540 #define MAX_INSN_SIZE 16
8541 -#define MAX_STACK_SIZE 64
8542 -#define MIN_STACK_SIZE(ADDR) \
8543 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8544 - THREAD_SIZE - (unsigned long)(ADDR))) \
8545 - ? (MAX_STACK_SIZE) \
8546 - : (((unsigned long)current_thread_info()) + \
8547 - THREAD_SIZE - (unsigned long)(ADDR)))
8548 +#define MAX_STACK_SIZE 64UL
8549 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8550
8551 #define flush_insn_slot(p) do { } while (0)
8552
8553 diff -urNp linux-2.6.32.42/arch/x86/include/asm/kvm_host.h linux-2.6.32.42/arch/x86/include/asm/kvm_host.h
8554 --- linux-2.6.32.42/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8555 +++ linux-2.6.32.42/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8556 @@ -536,7 +536,7 @@ struct kvm_x86_ops {
8557 const struct trace_print_flags *exit_reasons_str;
8558 };
8559
8560 -extern struct kvm_x86_ops *kvm_x86_ops;
8561 +extern const struct kvm_x86_ops *kvm_x86_ops;
8562
8563 int kvm_mmu_module_init(void);
8564 void kvm_mmu_module_exit(void);
8565 diff -urNp linux-2.6.32.42/arch/x86/include/asm/local.h linux-2.6.32.42/arch/x86/include/asm/local.h
8566 --- linux-2.6.32.42/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8567 +++ linux-2.6.32.42/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8568 @@ -18,26 +18,58 @@ typedef struct {
8569
8570 static inline void local_inc(local_t *l)
8571 {
8572 - asm volatile(_ASM_INC "%0"
8573 + asm volatile(_ASM_INC "%0\n"
8574 +
8575 +#ifdef CONFIG_PAX_REFCOUNT
8576 + "jno 0f\n"
8577 + _ASM_DEC "%0\n"
8578 + "int $4\n0:\n"
8579 + _ASM_EXTABLE(0b, 0b)
8580 +#endif
8581 +
8582 : "+m" (l->a.counter));
8583 }
8584
8585 static inline void local_dec(local_t *l)
8586 {
8587 - asm volatile(_ASM_DEC "%0"
8588 + asm volatile(_ASM_DEC "%0\n"
8589 +
8590 +#ifdef CONFIG_PAX_REFCOUNT
8591 + "jno 0f\n"
8592 + _ASM_INC "%0\n"
8593 + "int $4\n0:\n"
8594 + _ASM_EXTABLE(0b, 0b)
8595 +#endif
8596 +
8597 : "+m" (l->a.counter));
8598 }
8599
8600 static inline void local_add(long i, local_t *l)
8601 {
8602 - asm volatile(_ASM_ADD "%1,%0"
8603 + asm volatile(_ASM_ADD "%1,%0\n"
8604 +
8605 +#ifdef CONFIG_PAX_REFCOUNT
8606 + "jno 0f\n"
8607 + _ASM_SUB "%1,%0\n"
8608 + "int $4\n0:\n"
8609 + _ASM_EXTABLE(0b, 0b)
8610 +#endif
8611 +
8612 : "+m" (l->a.counter)
8613 : "ir" (i));
8614 }
8615
8616 static inline void local_sub(long i, local_t *l)
8617 {
8618 - asm volatile(_ASM_SUB "%1,%0"
8619 + asm volatile(_ASM_SUB "%1,%0\n"
8620 +
8621 +#ifdef CONFIG_PAX_REFCOUNT
8622 + "jno 0f\n"
8623 + _ASM_ADD "%1,%0\n"
8624 + "int $4\n0:\n"
8625 + _ASM_EXTABLE(0b, 0b)
8626 +#endif
8627 +
8628 : "+m" (l->a.counter)
8629 : "ir" (i));
8630 }
8631 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8632 {
8633 unsigned char c;
8634
8635 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8636 + asm volatile(_ASM_SUB "%2,%0\n"
8637 +
8638 +#ifdef CONFIG_PAX_REFCOUNT
8639 + "jno 0f\n"
8640 + _ASM_ADD "%2,%0\n"
8641 + "int $4\n0:\n"
8642 + _ASM_EXTABLE(0b, 0b)
8643 +#endif
8644 +
8645 + "sete %1\n"
8646 : "+m" (l->a.counter), "=qm" (c)
8647 : "ir" (i) : "memory");
8648 return c;
8649 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8650 {
8651 unsigned char c;
8652
8653 - asm volatile(_ASM_DEC "%0; sete %1"
8654 + asm volatile(_ASM_DEC "%0\n"
8655 +
8656 +#ifdef CONFIG_PAX_REFCOUNT
8657 + "jno 0f\n"
8658 + _ASM_INC "%0\n"
8659 + "int $4\n0:\n"
8660 + _ASM_EXTABLE(0b, 0b)
8661 +#endif
8662 +
8663 + "sete %1\n"
8664 : "+m" (l->a.counter), "=qm" (c)
8665 : : "memory");
8666 return c != 0;
8667 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8668 {
8669 unsigned char c;
8670
8671 - asm volatile(_ASM_INC "%0; sete %1"
8672 + asm volatile(_ASM_INC "%0\n"
8673 +
8674 +#ifdef CONFIG_PAX_REFCOUNT
8675 + "jno 0f\n"
8676 + _ASM_DEC "%0\n"
8677 + "int $4\n0:\n"
8678 + _ASM_EXTABLE(0b, 0b)
8679 +#endif
8680 +
8681 + "sete %1\n"
8682 : "+m" (l->a.counter), "=qm" (c)
8683 : : "memory");
8684 return c != 0;
8685 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8686 {
8687 unsigned char c;
8688
8689 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8690 + asm volatile(_ASM_ADD "%2,%0\n"
8691 +
8692 +#ifdef CONFIG_PAX_REFCOUNT
8693 + "jno 0f\n"
8694 + _ASM_SUB "%2,%0\n"
8695 + "int $4\n0:\n"
8696 + _ASM_EXTABLE(0b, 0b)
8697 +#endif
8698 +
8699 + "sets %1\n"
8700 : "+m" (l->a.counter), "=qm" (c)
8701 : "ir" (i) : "memory");
8702 return c;
8703 @@ -133,7 +201,15 @@ static inline long local_add_return(long
8704 #endif
8705 /* Modern 486+ processor */
8706 __i = i;
8707 - asm volatile(_ASM_XADD "%0, %1;"
8708 + asm volatile(_ASM_XADD "%0, %1\n"
8709 +
8710 +#ifdef CONFIG_PAX_REFCOUNT
8711 + "jno 0f\n"
8712 + _ASM_MOV "%0,%1\n"
8713 + "int $4\n0:\n"
8714 + _ASM_EXTABLE(0b, 0b)
8715 +#endif
8716 +
8717 : "+r" (i), "+m" (l->a.counter)
8718 : : "memory");
8719 return i + __i;
8720 diff -urNp linux-2.6.32.42/arch/x86/include/asm/microcode.h linux-2.6.32.42/arch/x86/include/asm/microcode.h
8721 --- linux-2.6.32.42/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
8722 +++ linux-2.6.32.42/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
8723 @@ -12,13 +12,13 @@ struct device;
8724 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
8725
8726 struct microcode_ops {
8727 - enum ucode_state (*request_microcode_user) (int cpu,
8728 + enum ucode_state (* const request_microcode_user) (int cpu,
8729 const void __user *buf, size_t size);
8730
8731 - enum ucode_state (*request_microcode_fw) (int cpu,
8732 + enum ucode_state (* const request_microcode_fw) (int cpu,
8733 struct device *device);
8734
8735 - void (*microcode_fini_cpu) (int cpu);
8736 + void (* const microcode_fini_cpu) (int cpu);
8737
8738 /*
8739 * The generic 'microcode_core' part guarantees that
8740 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
8741 extern struct ucode_cpu_info ucode_cpu_info[];
8742
8743 #ifdef CONFIG_MICROCODE_INTEL
8744 -extern struct microcode_ops * __init init_intel_microcode(void);
8745 +extern const struct microcode_ops * __init init_intel_microcode(void);
8746 #else
8747 -static inline struct microcode_ops * __init init_intel_microcode(void)
8748 +static inline const struct microcode_ops * __init init_intel_microcode(void)
8749 {
8750 return NULL;
8751 }
8752 #endif /* CONFIG_MICROCODE_INTEL */
8753
8754 #ifdef CONFIG_MICROCODE_AMD
8755 -extern struct microcode_ops * __init init_amd_microcode(void);
8756 +extern const struct microcode_ops * __init init_amd_microcode(void);
8757 #else
8758 -static inline struct microcode_ops * __init init_amd_microcode(void)
8759 +static inline const struct microcode_ops * __init init_amd_microcode(void)
8760 {
8761 return NULL;
8762 }
8763 diff -urNp linux-2.6.32.42/arch/x86/include/asm/mman.h linux-2.6.32.42/arch/x86/include/asm/mman.h
8764 --- linux-2.6.32.42/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
8765 +++ linux-2.6.32.42/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
8766 @@ -5,4 +5,14 @@
8767
8768 #include <asm-generic/mman.h>
8769
8770 +#ifdef __KERNEL__
8771 +#ifndef __ASSEMBLY__
8772 +#ifdef CONFIG_X86_32
8773 +#define arch_mmap_check i386_mmap_check
8774 +int i386_mmap_check(unsigned long addr, unsigned long len,
8775 + unsigned long flags);
8776 +#endif
8777 +#endif
8778 +#endif
8779 +
8780 #endif /* _ASM_X86_MMAN_H */
8781 diff -urNp linux-2.6.32.42/arch/x86/include/asm/mmu_context.h linux-2.6.32.42/arch/x86/include/asm/mmu_context.h
8782 --- linux-2.6.32.42/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
8783 +++ linux-2.6.32.42/arch/x86/include/asm/mmu_context.h 2011-04-17 15:56:46.000000000 -0400
8784 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
8785
8786 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8787 {
8788 +
8789 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8790 + unsigned int i;
8791 + pgd_t *pgd;
8792 +
8793 + pax_open_kernel();
8794 + pgd = get_cpu_pgd(smp_processor_id());
8795 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8796 + if (paravirt_enabled())
8797 + set_pgd(pgd+i, native_make_pgd(0));
8798 + else
8799 + pgd[i] = native_make_pgd(0);
8800 + pax_close_kernel();
8801 +#endif
8802 +
8803 #ifdef CONFIG_SMP
8804 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8805 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8806 @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
8807 struct task_struct *tsk)
8808 {
8809 unsigned cpu = smp_processor_id();
8810 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
8811 + int tlbstate = TLBSTATE_OK;
8812 +#endif
8813
8814 if (likely(prev != next)) {
8815 #ifdef CONFIG_SMP
8816 +#ifdef CONFIG_X86_32
8817 + tlbstate = percpu_read(cpu_tlbstate.state);
8818 +#endif
8819 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8820 percpu_write(cpu_tlbstate.active_mm, next);
8821 #endif
8822 cpumask_set_cpu(cpu, mm_cpumask(next));
8823
8824 /* Re-load page tables */
8825 +#ifdef CONFIG_PAX_PER_CPU_PGD
8826 + pax_open_kernel();
8827 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8828 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8829 + pax_close_kernel();
8830 + load_cr3(get_cpu_pgd(cpu));
8831 +#else
8832 load_cr3(next->pgd);
8833 +#endif
8834
8835 /* stop flush ipis for the previous mm */
8836 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8837 @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
8838 */
8839 if (unlikely(prev->context.ldt != next->context.ldt))
8840 load_LDT_nolock(&next->context);
8841 - }
8842 +
8843 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8844 + if (!nx_enabled) {
8845 + smp_mb__before_clear_bit();
8846 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8847 + smp_mb__after_clear_bit();
8848 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8849 + }
8850 +#endif
8851 +
8852 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8853 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8854 + prev->context.user_cs_limit != next->context.user_cs_limit))
8855 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8856 #ifdef CONFIG_SMP
8857 + else if (unlikely(tlbstate != TLBSTATE_OK))
8858 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8859 +#endif
8860 +#endif
8861 +
8862 + }
8863 else {
8864 +
8865 +#ifdef CONFIG_PAX_PER_CPU_PGD
8866 + pax_open_kernel();
8867 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8868 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8869 + pax_close_kernel();
8870 + load_cr3(get_cpu_pgd(cpu));
8871 +#endif
8872 +
8873 +#ifdef CONFIG_SMP
8874 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8875 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
8876
8877 @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
8878 * tlb flush IPI delivery. We must reload CR3
8879 * to make sure to use no freed page tables.
8880 */
8881 +
8882 +#ifndef CONFIG_PAX_PER_CPU_PGD
8883 load_cr3(next->pgd);
8884 +#endif
8885 +
8886 load_LDT_nolock(&next->context);
8887 +
8888 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
8889 + if (!nx_enabled)
8890 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8891 +#endif
8892 +
8893 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8894 +#ifdef CONFIG_PAX_PAGEEXEC
8895 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
8896 +#endif
8897 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8898 +#endif
8899 +
8900 }
8901 - }
8902 #endif
8903 + }
8904 }
8905
8906 #define activate_mm(prev, next) \
8907 diff -urNp linux-2.6.32.42/arch/x86/include/asm/mmu.h linux-2.6.32.42/arch/x86/include/asm/mmu.h
8908 --- linux-2.6.32.42/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
8909 +++ linux-2.6.32.42/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
8910 @@ -9,10 +9,23 @@
8911 * we put the segment information here.
8912 */
8913 typedef struct {
8914 - void *ldt;
8915 + struct desc_struct *ldt;
8916 int size;
8917 struct mutex lock;
8918 - void *vdso;
8919 + unsigned long vdso;
8920 +
8921 +#ifdef CONFIG_X86_32
8922 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8923 + unsigned long user_cs_base;
8924 + unsigned long user_cs_limit;
8925 +
8926 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8927 + cpumask_t cpu_user_cs_mask;
8928 +#endif
8929 +
8930 +#endif
8931 +#endif
8932 +
8933 } mm_context_t;
8934
8935 #ifdef CONFIG_SMP
8936 diff -urNp linux-2.6.32.42/arch/x86/include/asm/module.h linux-2.6.32.42/arch/x86/include/asm/module.h
8937 --- linux-2.6.32.42/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
8938 +++ linux-2.6.32.42/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
8939 @@ -5,6 +5,7 @@
8940
8941 #ifdef CONFIG_X86_64
8942 /* X86_64 does not define MODULE_PROC_FAMILY */
8943 +#define MODULE_PROC_FAMILY ""
8944 #elif defined CONFIG_M386
8945 #define MODULE_PROC_FAMILY "386 "
8946 #elif defined CONFIG_M486
8947 @@ -59,13 +60,36 @@
8948 #error unknown processor family
8949 #endif
8950
8951 -#ifdef CONFIG_X86_32
8952 -# ifdef CONFIG_4KSTACKS
8953 -# define MODULE_STACKSIZE "4KSTACKS "
8954 -# else
8955 -# define MODULE_STACKSIZE ""
8956 -# endif
8957 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
8958 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8959 +#define MODULE_PAX_UDEREF "UDEREF "
8960 +#else
8961 +#define MODULE_PAX_UDEREF ""
8962 +#endif
8963 +
8964 +#ifdef CONFIG_PAX_KERNEXEC
8965 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
8966 +#else
8967 +#define MODULE_PAX_KERNEXEC ""
8968 +#endif
8969 +
8970 +#ifdef CONFIG_PAX_REFCOUNT
8971 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
8972 +#else
8973 +#define MODULE_PAX_REFCOUNT ""
8974 #endif
8975
8976 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
8977 +#define MODULE_STACKSIZE "4KSTACKS "
8978 +#else
8979 +#define MODULE_STACKSIZE ""
8980 +#endif
8981 +
8982 +#ifdef CONFIG_GRKERNSEC
8983 +#define MODULE_GRSEC "GRSECURITY "
8984 +#else
8985 +#define MODULE_GRSEC ""
8986 +#endif
8987 +
8988 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
8989 +
8990 #endif /* _ASM_X86_MODULE_H */
8991 diff -urNp linux-2.6.32.42/arch/x86/include/asm/page_64_types.h linux-2.6.32.42/arch/x86/include/asm/page_64_types.h
8992 --- linux-2.6.32.42/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
8993 +++ linux-2.6.32.42/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
8994 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
8995
8996 /* duplicated to the one in bootmem.h */
8997 extern unsigned long max_pfn;
8998 -extern unsigned long phys_base;
8999 +extern const unsigned long phys_base;
9000
9001 extern unsigned long __phys_addr(unsigned long);
9002 #define __phys_reloc_hide(x) (x)
9003 diff -urNp linux-2.6.32.42/arch/x86/include/asm/paravirt.h linux-2.6.32.42/arch/x86/include/asm/paravirt.h
9004 --- linux-2.6.32.42/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9005 +++ linux-2.6.32.42/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
9006 @@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
9007 pv_mmu_ops.set_fixmap(idx, phys, flags);
9008 }
9009
9010 +#ifdef CONFIG_PAX_KERNEXEC
9011 +static inline unsigned long pax_open_kernel(void)
9012 +{
9013 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9014 +}
9015 +
9016 +static inline unsigned long pax_close_kernel(void)
9017 +{
9018 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9019 +}
9020 +#else
9021 +static inline unsigned long pax_open_kernel(void) { return 0; }
9022 +static inline unsigned long pax_close_kernel(void) { return 0; }
9023 +#endif
9024 +
9025 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9026
9027 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9028 @@ -945,7 +960,7 @@ extern void default_banner(void);
9029
9030 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9031 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9032 -#define PARA_INDIRECT(addr) *%cs:addr
9033 +#define PARA_INDIRECT(addr) *%ss:addr
9034 #endif
9035
9036 #define INTERRUPT_RETURN \
9037 @@ -1022,6 +1037,21 @@ extern void default_banner(void);
9038 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9039 CLBR_NONE, \
9040 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9041 +
9042 +#define GET_CR0_INTO_RDI \
9043 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9044 + mov %rax,%rdi
9045 +
9046 +#define SET_RDI_INTO_CR0 \
9047 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9048 +
9049 +#define GET_CR3_INTO_RDI \
9050 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9051 + mov %rax,%rdi
9052 +
9053 +#define SET_RDI_INTO_CR3 \
9054 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9055 +
9056 #endif /* CONFIG_X86_32 */
9057
9058 #endif /* __ASSEMBLY__ */
9059 diff -urNp linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h
9060 --- linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9061 +++ linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h 2011-04-17 15:56:46.000000000 -0400
9062 @@ -316,6 +316,12 @@ struct pv_mmu_ops {
9063 an mfn. We can tell which is which from the index. */
9064 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9065 phys_addr_t phys, pgprot_t flags);
9066 +
9067 +#ifdef CONFIG_PAX_KERNEXEC
9068 + unsigned long (*pax_open_kernel)(void);
9069 + unsigned long (*pax_close_kernel)(void);
9070 +#endif
9071 +
9072 };
9073
9074 struct raw_spinlock;
9075 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pci_x86.h linux-2.6.32.42/arch/x86/include/asm/pci_x86.h
9076 --- linux-2.6.32.42/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9077 +++ linux-2.6.32.42/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9078 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9079 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9080
9081 struct pci_raw_ops {
9082 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9083 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9084 int reg, int len, u32 *val);
9085 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9086 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9087 int reg, int len, u32 val);
9088 };
9089
9090 -extern struct pci_raw_ops *raw_pci_ops;
9091 -extern struct pci_raw_ops *raw_pci_ext_ops;
9092 +extern const struct pci_raw_ops *raw_pci_ops;
9093 +extern const struct pci_raw_ops *raw_pci_ext_ops;
9094
9095 -extern struct pci_raw_ops pci_direct_conf1;
9096 +extern const struct pci_raw_ops pci_direct_conf1;
9097 extern bool port_cf9_safe;
9098
9099 /* arch_initcall level */
9100 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgalloc.h linux-2.6.32.42/arch/x86/include/asm/pgalloc.h
9101 --- linux-2.6.32.42/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9102 +++ linux-2.6.32.42/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9103 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9104 pmd_t *pmd, pte_t *pte)
9105 {
9106 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9107 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9108 +}
9109 +
9110 +static inline void pmd_populate_user(struct mm_struct *mm,
9111 + pmd_t *pmd, pte_t *pte)
9112 +{
9113 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9114 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9115 }
9116
9117 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h
9118 --- linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9119 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9120 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9121
9122 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9123 {
9124 + pax_open_kernel();
9125 *pmdp = pmd;
9126 + pax_close_kernel();
9127 }
9128
9129 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9130 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h
9131 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9132 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9133 @@ -26,9 +26,6 @@
9134 struct mm_struct;
9135 struct vm_area_struct;
9136
9137 -extern pgd_t swapper_pg_dir[1024];
9138 -extern pgd_t trampoline_pg_dir[1024];
9139 -
9140 static inline void pgtable_cache_init(void) { }
9141 static inline void check_pgt_cache(void) { }
9142 void paging_init(void);
9143 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9144 # include <asm/pgtable-2level.h>
9145 #endif
9146
9147 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9148 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9149 +#ifdef CONFIG_X86_PAE
9150 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9151 +#endif
9152 +
9153 #if defined(CONFIG_HIGHPTE)
9154 #define __KM_PTE \
9155 (in_nmi() ? KM_NMI_PTE : \
9156 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9157 /* Clear a kernel PTE and flush it from the TLB */
9158 #define kpte_clear_flush(ptep, vaddr) \
9159 do { \
9160 + pax_open_kernel(); \
9161 pte_clear(&init_mm, (vaddr), (ptep)); \
9162 + pax_close_kernel(); \
9163 __flush_tlb_one((vaddr)); \
9164 } while (0)
9165
9166 @@ -85,6 +90,9 @@ do { \
9167
9168 #endif /* !__ASSEMBLY__ */
9169
9170 +#define HAVE_ARCH_UNMAPPED_AREA
9171 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9172 +
9173 /*
9174 * kern_addr_valid() is (1) for FLATMEM and (0) for
9175 * SPARSEMEM and DISCONTIGMEM
9176 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h
9177 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9178 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9179 @@ -8,7 +8,7 @@
9180 */
9181 #ifdef CONFIG_X86_PAE
9182 # include <asm/pgtable-3level_types.h>
9183 -# define PMD_SIZE (1UL << PMD_SHIFT)
9184 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9185 # define PMD_MASK (~(PMD_SIZE - 1))
9186 #else
9187 # include <asm/pgtable-2level_types.h>
9188 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9189 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9190 #endif
9191
9192 +#ifdef CONFIG_PAX_KERNEXEC
9193 +#ifndef __ASSEMBLY__
9194 +extern unsigned char MODULES_EXEC_VADDR[];
9195 +extern unsigned char MODULES_EXEC_END[];
9196 +#endif
9197 +#include <asm/boot.h>
9198 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9199 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9200 +#else
9201 +#define ktla_ktva(addr) (addr)
9202 +#define ktva_ktla(addr) (addr)
9203 +#endif
9204 +
9205 #define MODULES_VADDR VMALLOC_START
9206 #define MODULES_END VMALLOC_END
9207 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9208 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h
9209 --- linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9210 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9211 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9212
9213 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9214 {
9215 + pax_open_kernel();
9216 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9217 + pax_close_kernel();
9218 }
9219
9220 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9221 {
9222 + pax_open_kernel();
9223 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9224 + pax_close_kernel();
9225 }
9226
9227 /*
9228 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h
9229 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9230 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9231 @@ -16,10 +16,13 @@
9232
9233 extern pud_t level3_kernel_pgt[512];
9234 extern pud_t level3_ident_pgt[512];
9235 +extern pud_t level3_vmalloc_pgt[512];
9236 +extern pud_t level3_vmemmap_pgt[512];
9237 +extern pud_t level2_vmemmap_pgt[512];
9238 extern pmd_t level2_kernel_pgt[512];
9239 extern pmd_t level2_fixmap_pgt[512];
9240 -extern pmd_t level2_ident_pgt[512];
9241 -extern pgd_t init_level4_pgt[];
9242 +extern pmd_t level2_ident_pgt[512*2];
9243 +extern pgd_t init_level4_pgt[512];
9244
9245 #define swapper_pg_dir init_level4_pgt
9246
9247 @@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9248
9249 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9250 {
9251 + pax_open_kernel();
9252 *pmdp = pmd;
9253 + pax_close_kernel();
9254 }
9255
9256 static inline void native_pmd_clear(pmd_t *pmd)
9257 @@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9258
9259 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9260 {
9261 + pax_open_kernel();
9262 *pgdp = pgd;
9263 + pax_close_kernel();
9264 }
9265
9266 static inline void native_pgd_clear(pgd_t *pgd)
9267 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h
9268 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9269 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9270 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9271 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9272 #define MODULES_END _AC(0xffffffffff000000, UL)
9273 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9274 +#define MODULES_EXEC_VADDR MODULES_VADDR
9275 +#define MODULES_EXEC_END MODULES_END
9276 +
9277 +#define ktla_ktva(addr) (addr)
9278 +#define ktva_ktla(addr) (addr)
9279
9280 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9281 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable.h linux-2.6.32.42/arch/x86/include/asm/pgtable.h
9282 --- linux-2.6.32.42/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9283 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9284 @@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9285
9286 #define arch_end_context_switch(prev) do {} while(0)
9287
9288 +#define pax_open_kernel() native_pax_open_kernel()
9289 +#define pax_close_kernel() native_pax_close_kernel()
9290 #endif /* CONFIG_PARAVIRT */
9291
9292 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9293 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9294 +
9295 +#ifdef CONFIG_PAX_KERNEXEC
9296 +static inline unsigned long native_pax_open_kernel(void)
9297 +{
9298 + unsigned long cr0;
9299 +
9300 + preempt_disable();
9301 + barrier();
9302 + cr0 = read_cr0() ^ X86_CR0_WP;
9303 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9304 + write_cr0(cr0);
9305 + return cr0 ^ X86_CR0_WP;
9306 +}
9307 +
9308 +static inline unsigned long native_pax_close_kernel(void)
9309 +{
9310 + unsigned long cr0;
9311 +
9312 + cr0 = read_cr0() ^ X86_CR0_WP;
9313 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9314 + write_cr0(cr0);
9315 + barrier();
9316 + preempt_enable_no_resched();
9317 + return cr0 ^ X86_CR0_WP;
9318 +}
9319 +#else
9320 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9321 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9322 +#endif
9323 +
9324 /*
9325 * The following only work if pte_present() is true.
9326 * Undefined behaviour if not..
9327 */
9328 +static inline int pte_user(pte_t pte)
9329 +{
9330 + return pte_val(pte) & _PAGE_USER;
9331 +}
9332 +
9333 static inline int pte_dirty(pte_t pte)
9334 {
9335 return pte_flags(pte) & _PAGE_DIRTY;
9336 @@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9337 return pte_clear_flags(pte, _PAGE_RW);
9338 }
9339
9340 +static inline pte_t pte_mkread(pte_t pte)
9341 +{
9342 + return __pte(pte_val(pte) | _PAGE_USER);
9343 +}
9344 +
9345 static inline pte_t pte_mkexec(pte_t pte)
9346 {
9347 - return pte_clear_flags(pte, _PAGE_NX);
9348 +#ifdef CONFIG_X86_PAE
9349 + if (__supported_pte_mask & _PAGE_NX)
9350 + return pte_clear_flags(pte, _PAGE_NX);
9351 + else
9352 +#endif
9353 + return pte_set_flags(pte, _PAGE_USER);
9354 +}
9355 +
9356 +static inline pte_t pte_exprotect(pte_t pte)
9357 +{
9358 +#ifdef CONFIG_X86_PAE
9359 + if (__supported_pte_mask & _PAGE_NX)
9360 + return pte_set_flags(pte, _PAGE_NX);
9361 + else
9362 +#endif
9363 + return pte_clear_flags(pte, _PAGE_USER);
9364 }
9365
9366 static inline pte_t pte_mkdirty(pte_t pte)
9367 @@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9368 #endif
9369
9370 #ifndef __ASSEMBLY__
9371 +
9372 +#ifdef CONFIG_PAX_PER_CPU_PGD
9373 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9374 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9375 +{
9376 + return cpu_pgd[cpu];
9377 +}
9378 +#endif
9379 +
9380 #include <linux/mm_types.h>
9381
9382 static inline int pte_none(pte_t pte)
9383 @@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9384
9385 static inline int pgd_bad(pgd_t pgd)
9386 {
9387 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9388 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9389 }
9390
9391 static inline int pgd_none(pgd_t pgd)
9392 @@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9393 * pgd_offset() returns a (pgd_t *)
9394 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9395 */
9396 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9397 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9398 +
9399 +#ifdef CONFIG_PAX_PER_CPU_PGD
9400 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9401 +#endif
9402 +
9403 /*
9404 * a shortcut which implies the use of the kernel's pgd, instead
9405 * of a process's
9406 @@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9407 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9408 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9409
9410 +#ifdef CONFIG_X86_32
9411 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9412 +#else
9413 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9414 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9415 +
9416 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9417 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9418 +#else
9419 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9420 +#endif
9421 +
9422 +#endif
9423 +
9424 #ifndef __ASSEMBLY__
9425
9426 extern int direct_gbpages;
9427 @@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9428 * dst and src can be on the same page, but the range must not overlap,
9429 * and must not cross a page boundary.
9430 */
9431 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9432 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9433 {
9434 - memcpy(dst, src, count * sizeof(pgd_t));
9435 + pax_open_kernel();
9436 + while (count--)
9437 + *dst++ = *src++;
9438 + pax_close_kernel();
9439 }
9440
9441 +#ifdef CONFIG_PAX_PER_CPU_PGD
9442 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9443 +#endif
9444 +
9445 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9446 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9447 +#else
9448 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9449 +#endif
9450
9451 #include <asm-generic/pgtable.h>
9452 #endif /* __ASSEMBLY__ */
9453 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h
9454 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9455 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9456 @@ -16,12 +16,11 @@
9457 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9458 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9459 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9460 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9461 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9462 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9463 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9464 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9465 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9466 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9467 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9468 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9469
9470 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9471 @@ -39,7 +38,6 @@
9472 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9473 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9474 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9475 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9476 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9477 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9478 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9479 @@ -55,8 +53,10 @@
9480
9481 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9482 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9483 -#else
9484 +#elif defined(CONFIG_KMEMCHECK)
9485 #define _PAGE_NX (_AT(pteval_t, 0))
9486 +#else
9487 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9488 #endif
9489
9490 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9491 @@ -93,6 +93,9 @@
9492 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9493 _PAGE_ACCESSED)
9494
9495 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9496 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9497 +
9498 #define __PAGE_KERNEL_EXEC \
9499 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9500 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9501 @@ -103,8 +106,8 @@
9502 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9503 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9504 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9505 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9506 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9507 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9508 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9509 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9510 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9511 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9512 @@ -163,8 +166,8 @@
9513 * bits are combined, this will alow user to access the high address mapped
9514 * VDSO in the presence of CONFIG_COMPAT_VDSO
9515 */
9516 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9517 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9518 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9519 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9520 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9521 #endif
9522
9523 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9524 {
9525 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9526 }
9527 +#endif
9528
9529 +#if PAGETABLE_LEVELS == 3
9530 +#include <asm-generic/pgtable-nopud.h>
9531 +#endif
9532 +
9533 +#if PAGETABLE_LEVELS == 2
9534 +#include <asm-generic/pgtable-nopmd.h>
9535 +#endif
9536 +
9537 +#ifndef __ASSEMBLY__
9538 #if PAGETABLE_LEVELS > 3
9539 typedef struct { pudval_t pud; } pud_t;
9540
9541 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9542 return pud.pud;
9543 }
9544 #else
9545 -#include <asm-generic/pgtable-nopud.h>
9546 -
9547 static inline pudval_t native_pud_val(pud_t pud)
9548 {
9549 return native_pgd_val(pud.pgd);
9550 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9551 return pmd.pmd;
9552 }
9553 #else
9554 -#include <asm-generic/pgtable-nopmd.h>
9555 -
9556 static inline pmdval_t native_pmd_val(pmd_t pmd)
9557 {
9558 return native_pgd_val(pmd.pud.pgd);
9559 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9560
9561 extern pteval_t __supported_pte_mask;
9562 extern void set_nx(void);
9563 +
9564 +#ifdef CONFIG_X86_32
9565 +#ifdef CONFIG_X86_PAE
9566 extern int nx_enabled;
9567 +#else
9568 +#define nx_enabled (0)
9569 +#endif
9570 +#else
9571 +#define nx_enabled (1)
9572 +#endif
9573
9574 #define pgprot_writecombine pgprot_writecombine
9575 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9576 diff -urNp linux-2.6.32.42/arch/x86/include/asm/processor.h linux-2.6.32.42/arch/x86/include/asm/processor.h
9577 --- linux-2.6.32.42/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9578 +++ linux-2.6.32.42/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9579 @@ -272,7 +272,7 @@ struct tss_struct {
9580
9581 } ____cacheline_aligned;
9582
9583 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9584 +extern struct tss_struct init_tss[NR_CPUS];
9585
9586 /*
9587 * Save the original ist values for checking stack pointers during debugging
9588 @@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9589 */
9590 #define TASK_SIZE PAGE_OFFSET
9591 #define TASK_SIZE_MAX TASK_SIZE
9592 +
9593 +#ifdef CONFIG_PAX_SEGMEXEC
9594 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9595 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9596 +#else
9597 #define STACK_TOP TASK_SIZE
9598 -#define STACK_TOP_MAX STACK_TOP
9599 +#endif
9600 +
9601 +#define STACK_TOP_MAX TASK_SIZE
9602
9603 #define INIT_THREAD { \
9604 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9605 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9606 .vm86_info = NULL, \
9607 .sysenter_cs = __KERNEL_CS, \
9608 .io_bitmap_ptr = NULL, \
9609 @@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
9610 */
9611 #define INIT_TSS { \
9612 .x86_tss = { \
9613 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9614 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9615 .ss0 = __KERNEL_DS, \
9616 .ss1 = __KERNEL_CS, \
9617 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9618 @@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
9619 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9620
9621 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9622 -#define KSTK_TOP(info) \
9623 -({ \
9624 - unsigned long *__ptr = (unsigned long *)(info); \
9625 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9626 -})
9627 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9628
9629 /*
9630 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9631 @@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
9632 #define task_pt_regs(task) \
9633 ({ \
9634 struct pt_regs *__regs__; \
9635 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9636 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9637 __regs__ - 1; \
9638 })
9639
9640 @@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
9641 /*
9642 * User space process size. 47bits minus one guard page.
9643 */
9644 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9645 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9646
9647 /* This decides where the kernel will search for a free chunk of vm
9648 * space during mmap's.
9649 */
9650 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9651 - 0xc0000000 : 0xFFFFe000)
9652 + 0xc0000000 : 0xFFFFf000)
9653
9654 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9655 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9656 @@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
9657 #define STACK_TOP_MAX TASK_SIZE_MAX
9658
9659 #define INIT_THREAD { \
9660 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9661 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9662 }
9663
9664 #define INIT_TSS { \
9665 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9666 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9667 }
9668
9669 /*
9670 @@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
9671 */
9672 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9673
9674 +#ifdef CONFIG_PAX_SEGMEXEC
9675 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9676 +#endif
9677 +
9678 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9679
9680 /* Get/set a process' ability to use the timestamp counter instruction */
9681 diff -urNp linux-2.6.32.42/arch/x86/include/asm/ptrace.h linux-2.6.32.42/arch/x86/include/asm/ptrace.h
9682 --- linux-2.6.32.42/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
9683 +++ linux-2.6.32.42/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
9684 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_
9685 }
9686
9687 /*
9688 - * user_mode_vm(regs) determines whether a register set came from user mode.
9689 + * user_mode(regs) determines whether a register set came from user mode.
9690 * This is true if V8086 mode was enabled OR if the register set was from
9691 * protected mode with RPL-3 CS value. This tricky test checks that with
9692 * one comparison. Many places in the kernel can bypass this full check
9693 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9694 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9695 + * be used.
9696 */
9697 -static inline int user_mode(struct pt_regs *regs)
9698 +static inline int user_mode_novm(struct pt_regs *regs)
9699 {
9700 #ifdef CONFIG_X86_32
9701 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9702 #else
9703 - return !!(regs->cs & 3);
9704 + return !!(regs->cs & SEGMENT_RPL_MASK);
9705 #endif
9706 }
9707
9708 -static inline int user_mode_vm(struct pt_regs *regs)
9709 +static inline int user_mode(struct pt_regs *regs)
9710 {
9711 #ifdef CONFIG_X86_32
9712 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9713 USER_RPL;
9714 #else
9715 - return user_mode(regs);
9716 + return user_mode_novm(regs);
9717 #endif
9718 }
9719
9720 diff -urNp linux-2.6.32.42/arch/x86/include/asm/reboot.h linux-2.6.32.42/arch/x86/include/asm/reboot.h
9721 --- linux-2.6.32.42/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
9722 +++ linux-2.6.32.42/arch/x86/include/asm/reboot.h 2011-05-22 23:02:03.000000000 -0400
9723 @@ -6,19 +6,19 @@
9724 struct pt_regs;
9725
9726 struct machine_ops {
9727 - void (*restart)(char *cmd);
9728 - void (*halt)(void);
9729 - void (*power_off)(void);
9730 + void (* __noreturn restart)(char *cmd);
9731 + void (* __noreturn halt)(void);
9732 + void (* __noreturn power_off)(void);
9733 void (*shutdown)(void);
9734 void (*crash_shutdown)(struct pt_regs *);
9735 - void (*emergency_restart)(void);
9736 + void (* __noreturn emergency_restart)(void);
9737 };
9738
9739 extern struct machine_ops machine_ops;
9740
9741 void native_machine_crash_shutdown(struct pt_regs *regs);
9742 void native_machine_shutdown(void);
9743 -void machine_real_restart(const unsigned char *code, int length);
9744 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
9745
9746 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
9747 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
9748 diff -urNp linux-2.6.32.42/arch/x86/include/asm/rwsem.h linux-2.6.32.42/arch/x86/include/asm/rwsem.h
9749 --- linux-2.6.32.42/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
9750 +++ linux-2.6.32.42/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
9751 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw
9752 {
9753 asm volatile("# beginning down_read\n\t"
9754 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9755 +
9756 +#ifdef CONFIG_PAX_REFCOUNT
9757 + "jno 0f\n"
9758 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
9759 + "int $4\n0:\n"
9760 + _ASM_EXTABLE(0b, 0b)
9761 +#endif
9762 +
9763 /* adds 0x00000001, returns the old value */
9764 " jns 1f\n"
9765 " call call_rwsem_down_read_failed\n"
9766 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
9767 "1:\n\t"
9768 " mov %1,%2\n\t"
9769 " add %3,%2\n\t"
9770 +
9771 +#ifdef CONFIG_PAX_REFCOUNT
9772 + "jno 0f\n"
9773 + "sub %3,%2\n"
9774 + "int $4\n0:\n"
9775 + _ASM_EXTABLE(0b, 0b)
9776 +#endif
9777 +
9778 " jle 2f\n\t"
9779 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9780 " jnz 1b\n\t"
9781 @@ -160,6 +176,14 @@ static inline void __down_write_nested(s
9782 tmp = RWSEM_ACTIVE_WRITE_BIAS;
9783 asm volatile("# beginning down_write\n\t"
9784 LOCK_PREFIX " xadd %1,(%2)\n\t"
9785 +
9786 +#ifdef CONFIG_PAX_REFCOUNT
9787 + "jno 0f\n"
9788 + "mov %1,(%2)\n"
9789 + "int $4\n0:\n"
9790 + _ASM_EXTABLE(0b, 0b)
9791 +#endif
9792 +
9793 /* subtract 0x0000ffff, returns the old value */
9794 " test %1,%1\n\t"
9795 /* was the count 0 before? */
9796 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
9797 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
9798 asm volatile("# beginning __up_read\n\t"
9799 LOCK_PREFIX " xadd %1,(%2)\n\t"
9800 +
9801 +#ifdef CONFIG_PAX_REFCOUNT
9802 + "jno 0f\n"
9803 + "mov %1,(%2)\n"
9804 + "int $4\n0:\n"
9805 + _ASM_EXTABLE(0b, 0b)
9806 +#endif
9807 +
9808 /* subtracts 1, returns the old value */
9809 " jns 1f\n\t"
9810 " call call_rwsem_wake\n"
9811 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
9812 rwsem_count_t tmp;
9813 asm volatile("# beginning __up_write\n\t"
9814 LOCK_PREFIX " xadd %1,(%2)\n\t"
9815 +
9816 +#ifdef CONFIG_PAX_REFCOUNT
9817 + "jno 0f\n"
9818 + "mov %1,(%2)\n"
9819 + "int $4\n0:\n"
9820 + _ASM_EXTABLE(0b, 0b)
9821 +#endif
9822 +
9823 /* tries to transition
9824 0xffff0001 -> 0x00000000 */
9825 " jz 1f\n"
9826 @@ -234,6 +274,14 @@ static inline void __downgrade_write(str
9827 {
9828 asm volatile("# beginning __downgrade_write\n\t"
9829 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
9830 +
9831 +#ifdef CONFIG_PAX_REFCOUNT
9832 + "jno 0f\n"
9833 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
9834 + "int $4\n0:\n"
9835 + _ASM_EXTABLE(0b, 0b)
9836 +#endif
9837 +
9838 /*
9839 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
9840 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
9841 @@ -253,7 +301,15 @@ static inline void __downgrade_write(str
9842 static inline void rwsem_atomic_add(rwsem_count_t delta,
9843 struct rw_semaphore *sem)
9844 {
9845 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
9846 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
9847 +
9848 +#ifdef CONFIG_PAX_REFCOUNT
9849 + "jno 0f\n"
9850 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
9851 + "int $4\n0:\n"
9852 + _ASM_EXTABLE(0b, 0b)
9853 +#endif
9854 +
9855 : "+m" (sem->count)
9856 : "er" (delta));
9857 }
9858 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
9859 {
9860 rwsem_count_t tmp = delta;
9861
9862 - asm volatile(LOCK_PREFIX "xadd %0,%1"
9863 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
9864 +
9865 +#ifdef CONFIG_PAX_REFCOUNT
9866 + "jno 0f\n"
9867 + "mov %0,%1\n"
9868 + "int $4\n0:\n"
9869 + _ASM_EXTABLE(0b, 0b)
9870 +#endif
9871 +
9872 : "+r" (tmp), "+m" (sem->count)
9873 : : "memory");
9874
9875 diff -urNp linux-2.6.32.42/arch/x86/include/asm/segment.h linux-2.6.32.42/arch/x86/include/asm/segment.h
9876 --- linux-2.6.32.42/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
9877 +++ linux-2.6.32.42/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
9878 @@ -62,8 +62,8 @@
9879 * 26 - ESPFIX small SS
9880 * 27 - per-cpu [ offset to per-cpu data area ]
9881 * 28 - stack_canary-20 [ for stack protector ]
9882 - * 29 - unused
9883 - * 30 - unused
9884 + * 29 - PCI BIOS CS
9885 + * 30 - PCI BIOS DS
9886 * 31 - TSS for double fault handler
9887 */
9888 #define GDT_ENTRY_TLS_MIN 6
9889 @@ -77,6 +77,8 @@
9890
9891 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
9892
9893 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
9894 +
9895 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
9896
9897 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
9898 @@ -88,7 +90,7 @@
9899 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
9900 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
9901
9902 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
9903 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
9904 #ifdef CONFIG_SMP
9905 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
9906 #else
9907 @@ -102,6 +104,12 @@
9908 #define __KERNEL_STACK_CANARY 0
9909 #endif
9910
9911 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
9912 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
9913 +
9914 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
9915 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
9916 +
9917 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
9918
9919 /*
9920 @@ -139,7 +147,7 @@
9921 */
9922
9923 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
9924 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
9925 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
9926
9927
9928 #else
9929 @@ -163,6 +171,8 @@
9930 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
9931 #define __USER32_DS __USER_DS
9932
9933 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
9934 +
9935 #define GDT_ENTRY_TSS 8 /* needs two entries */
9936 #define GDT_ENTRY_LDT 10 /* needs two entries */
9937 #define GDT_ENTRY_TLS_MIN 12
9938 @@ -183,6 +193,7 @@
9939 #endif
9940
9941 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
9942 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
9943 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
9944 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
9945 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
9946 diff -urNp linux-2.6.32.42/arch/x86/include/asm/smp.h linux-2.6.32.42/arch/x86/include/asm/smp.h
9947 --- linux-2.6.32.42/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
9948 +++ linux-2.6.32.42/arch/x86/include/asm/smp.h 2011-04-17 15:56:46.000000000 -0400
9949 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
9950 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
9951 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
9952 DECLARE_PER_CPU(u16, cpu_llc_id);
9953 -DECLARE_PER_CPU(int, cpu_number);
9954 +DECLARE_PER_CPU(unsigned int, cpu_number);
9955
9956 static inline struct cpumask *cpu_sibling_mask(int cpu)
9957 {
9958 @@ -175,14 +175,8 @@ extern unsigned disabled_cpus __cpuinitd
9959 extern int safe_smp_processor_id(void);
9960
9961 #elif defined(CONFIG_X86_64_SMP)
9962 -#define raw_smp_processor_id() (percpu_read(cpu_number))
9963 -
9964 -#define stack_smp_processor_id() \
9965 -({ \
9966 - struct thread_info *ti; \
9967 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
9968 - ti->cpu; \
9969 -})
9970 +#define raw_smp_processor_id() (percpu_read(cpu_number))
9971 +#define stack_smp_processor_id() raw_smp_processor_id()
9972 #define safe_smp_processor_id() smp_processor_id()
9973
9974 #endif
9975 diff -urNp linux-2.6.32.42/arch/x86/include/asm/spinlock.h linux-2.6.32.42/arch/x86/include/asm/spinlock.h
9976 --- linux-2.6.32.42/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
9977 +++ linux-2.6.32.42/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
9978 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
9979 static inline void __raw_read_lock(raw_rwlock_t *rw)
9980 {
9981 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
9982 +
9983 +#ifdef CONFIG_PAX_REFCOUNT
9984 + "jno 0f\n"
9985 + LOCK_PREFIX " addl $1,(%0)\n"
9986 + "int $4\n0:\n"
9987 + _ASM_EXTABLE(0b, 0b)
9988 +#endif
9989 +
9990 "jns 1f\n"
9991 "call __read_lock_failed\n\t"
9992 "1:\n"
9993 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
9994 static inline void __raw_write_lock(raw_rwlock_t *rw)
9995 {
9996 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
9997 +
9998 +#ifdef CONFIG_PAX_REFCOUNT
9999 + "jno 0f\n"
10000 + LOCK_PREFIX " addl %1,(%0)\n"
10001 + "int $4\n0:\n"
10002 + _ASM_EXTABLE(0b, 0b)
10003 +#endif
10004 +
10005 "jz 1f\n"
10006 "call __write_lock_failed\n\t"
10007 "1:\n"
10008 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10009
10010 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10011 {
10012 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10013 + asm volatile(LOCK_PREFIX "incl %0\n"
10014 +
10015 +#ifdef CONFIG_PAX_REFCOUNT
10016 + "jno 0f\n"
10017 + LOCK_PREFIX "decl %0\n"
10018 + "int $4\n0:\n"
10019 + _ASM_EXTABLE(0b, 0b)
10020 +#endif
10021 +
10022 + :"+m" (rw->lock) : : "memory");
10023 }
10024
10025 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10026 {
10027 - asm volatile(LOCK_PREFIX "addl %1, %0"
10028 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
10029 +
10030 +#ifdef CONFIG_PAX_REFCOUNT
10031 + "jno 0f\n"
10032 + LOCK_PREFIX "subl %1, %0\n"
10033 + "int $4\n0:\n"
10034 + _ASM_EXTABLE(0b, 0b)
10035 +#endif
10036 +
10037 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10038 }
10039
10040 diff -urNp linux-2.6.32.42/arch/x86/include/asm/stackprotector.h linux-2.6.32.42/arch/x86/include/asm/stackprotector.h
10041 --- linux-2.6.32.42/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10042 +++ linux-2.6.32.42/arch/x86/include/asm/stackprotector.h 2011-04-17 15:56:46.000000000 -0400
10043 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10044
10045 static inline void load_stack_canary_segment(void)
10046 {
10047 -#ifdef CONFIG_X86_32
10048 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10049 asm volatile ("mov %0, %%gs" : : "r" (0));
10050 #endif
10051 }
10052 diff -urNp linux-2.6.32.42/arch/x86/include/asm/system.h linux-2.6.32.42/arch/x86/include/asm/system.h
10053 --- linux-2.6.32.42/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10054 +++ linux-2.6.32.42/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10055 @@ -132,7 +132,7 @@ do { \
10056 "thread_return:\n\t" \
10057 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10058 __switch_canary \
10059 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10060 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10061 "movq %%rax,%%rdi\n\t" \
10062 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10063 "jnz ret_from_fork\n\t" \
10064 @@ -143,7 +143,7 @@ do { \
10065 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10066 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10067 [_tif_fork] "i" (_TIF_FORK), \
10068 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10069 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
10070 [current_task] "m" (per_cpu_var(current_task)) \
10071 __switch_canary_iparam \
10072 : "memory", "cc" __EXTRA_CLOBBER)
10073 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10074 {
10075 unsigned long __limit;
10076 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10077 - return __limit + 1;
10078 + return __limit;
10079 }
10080
10081 static inline void native_clts(void)
10082 @@ -340,12 +340,12 @@ void enable_hlt(void);
10083
10084 void cpu_idle_wait(void);
10085
10086 -extern unsigned long arch_align_stack(unsigned long sp);
10087 +#define arch_align_stack(x) ((x) & ~0xfUL)
10088 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10089
10090 void default_idle(void);
10091
10092 -void stop_this_cpu(void *dummy);
10093 +void stop_this_cpu(void *dummy) __noreturn;
10094
10095 /*
10096 * Force strict CPU ordering.
10097 diff -urNp linux-2.6.32.42/arch/x86/include/asm/thread_info.h linux-2.6.32.42/arch/x86/include/asm/thread_info.h
10098 --- linux-2.6.32.42/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10099 +++ linux-2.6.32.42/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10100 @@ -10,6 +10,7 @@
10101 #include <linux/compiler.h>
10102 #include <asm/page.h>
10103 #include <asm/types.h>
10104 +#include <asm/percpu.h>
10105
10106 /*
10107 * low level task data that entry.S needs immediate access to
10108 @@ -24,7 +25,6 @@ struct exec_domain;
10109 #include <asm/atomic.h>
10110
10111 struct thread_info {
10112 - struct task_struct *task; /* main task structure */
10113 struct exec_domain *exec_domain; /* execution domain */
10114 __u32 flags; /* low level flags */
10115 __u32 status; /* thread synchronous flags */
10116 @@ -34,18 +34,12 @@ struct thread_info {
10117 mm_segment_t addr_limit;
10118 struct restart_block restart_block;
10119 void __user *sysenter_return;
10120 -#ifdef CONFIG_X86_32
10121 - unsigned long previous_esp; /* ESP of the previous stack in
10122 - case of nested (IRQ) stacks
10123 - */
10124 - __u8 supervisor_stack[0];
10125 -#endif
10126 + unsigned long lowest_stack;
10127 int uaccess_err;
10128 };
10129
10130 -#define INIT_THREAD_INFO(tsk) \
10131 +#define INIT_THREAD_INFO \
10132 { \
10133 - .task = &tsk, \
10134 .exec_domain = &default_exec_domain, \
10135 .flags = 0, \
10136 .cpu = 0, \
10137 @@ -56,7 +50,7 @@ struct thread_info {
10138 }, \
10139 }
10140
10141 -#define init_thread_info (init_thread_union.thread_info)
10142 +#define init_thread_info (init_thread_union.stack)
10143 #define init_stack (init_thread_union.stack)
10144
10145 #else /* !__ASSEMBLY__ */
10146 @@ -163,6 +157,23 @@ struct thread_info {
10147 #define alloc_thread_info(tsk) \
10148 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10149
10150 +#ifdef __ASSEMBLY__
10151 +/* how to get the thread information struct from ASM */
10152 +#define GET_THREAD_INFO(reg) \
10153 + mov PER_CPU_VAR(current_tinfo), reg
10154 +
10155 +/* use this one if reg already contains %esp */
10156 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10157 +#else
10158 +/* how to get the thread information struct from C */
10159 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10160 +
10161 +static __always_inline struct thread_info *current_thread_info(void)
10162 +{
10163 + return percpu_read_stable(current_tinfo);
10164 +}
10165 +#endif
10166 +
10167 #ifdef CONFIG_X86_32
10168
10169 #define STACK_WARN (THREAD_SIZE/8)
10170 @@ -173,35 +184,13 @@ struct thread_info {
10171 */
10172 #ifndef __ASSEMBLY__
10173
10174 -
10175 /* how to get the current stack pointer from C */
10176 register unsigned long current_stack_pointer asm("esp") __used;
10177
10178 -/* how to get the thread information struct from C */
10179 -static inline struct thread_info *current_thread_info(void)
10180 -{
10181 - return (struct thread_info *)
10182 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10183 -}
10184 -
10185 -#else /* !__ASSEMBLY__ */
10186 -
10187 -/* how to get the thread information struct from ASM */
10188 -#define GET_THREAD_INFO(reg) \
10189 - movl $-THREAD_SIZE, reg; \
10190 - andl %esp, reg
10191 -
10192 -/* use this one if reg already contains %esp */
10193 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10194 - andl $-THREAD_SIZE, reg
10195 -
10196 #endif
10197
10198 #else /* X86_32 */
10199
10200 -#include <asm/percpu.h>
10201 -#define KERNEL_STACK_OFFSET (5*8)
10202 -
10203 /*
10204 * macros/functions for gaining access to the thread information structure
10205 * preempt_count needs to be 1 initially, until the scheduler is functional.
10206 @@ -209,21 +198,8 @@ static inline struct thread_info *curren
10207 #ifndef __ASSEMBLY__
10208 DECLARE_PER_CPU(unsigned long, kernel_stack);
10209
10210 -static inline struct thread_info *current_thread_info(void)
10211 -{
10212 - struct thread_info *ti;
10213 - ti = (void *)(percpu_read_stable(kernel_stack) +
10214 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10215 - return ti;
10216 -}
10217 -
10218 -#else /* !__ASSEMBLY__ */
10219 -
10220 -/* how to get the thread information struct from ASM */
10221 -#define GET_THREAD_INFO(reg) \
10222 - movq PER_CPU_VAR(kernel_stack),reg ; \
10223 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10224 -
10225 +/* how to get the current stack pointer from C */
10226 +register unsigned long current_stack_pointer asm("rsp") __used;
10227 #endif
10228
10229 #endif /* !X86_32 */
10230 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10231 extern void free_thread_info(struct thread_info *ti);
10232 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10233 #define arch_task_cache_init arch_task_cache_init
10234 +
10235 +#define __HAVE_THREAD_FUNCTIONS
10236 +#define task_thread_info(task) (&(task)->tinfo)
10237 +#define task_stack_page(task) ((task)->stack)
10238 +#define setup_thread_stack(p, org) do {} while (0)
10239 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10240 +
10241 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10242 +extern struct task_struct *alloc_task_struct(void);
10243 +extern void free_task_struct(struct task_struct *);
10244 +
10245 #endif
10246 #endif /* _ASM_X86_THREAD_INFO_H */
10247 diff -urNp linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h
10248 --- linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10249 +++ linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10250 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10251 static __always_inline unsigned long __must_check
10252 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10253 {
10254 + pax_track_stack();
10255 +
10256 + if ((long)n < 0)
10257 + return n;
10258 +
10259 if (__builtin_constant_p(n)) {
10260 unsigned long ret;
10261
10262 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10263 return ret;
10264 }
10265 }
10266 + if (!__builtin_constant_p(n))
10267 + check_object_size(from, n, true);
10268 return __copy_to_user_ll(to, from, n);
10269 }
10270
10271 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10272 __copy_to_user(void __user *to, const void *from, unsigned long n)
10273 {
10274 might_fault();
10275 +
10276 return __copy_to_user_inatomic(to, from, n);
10277 }
10278
10279 static __always_inline unsigned long
10280 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10281 {
10282 + if ((long)n < 0)
10283 + return n;
10284 +
10285 /* Avoid zeroing the tail if the copy fails..
10286 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10287 * but as the zeroing behaviour is only significant when n is not
10288 @@ -138,6 +149,12 @@ static __always_inline unsigned long
10289 __copy_from_user(void *to, const void __user *from, unsigned long n)
10290 {
10291 might_fault();
10292 +
10293 + pax_track_stack();
10294 +
10295 + if ((long)n < 0)
10296 + return n;
10297 +
10298 if (__builtin_constant_p(n)) {
10299 unsigned long ret;
10300
10301 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10302 return ret;
10303 }
10304 }
10305 + if (!__builtin_constant_p(n))
10306 + check_object_size(to, n, false);
10307 return __copy_from_user_ll(to, from, n);
10308 }
10309
10310 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10311 const void __user *from, unsigned long n)
10312 {
10313 might_fault();
10314 +
10315 + if ((long)n < 0)
10316 + return n;
10317 +
10318 if (__builtin_constant_p(n)) {
10319 unsigned long ret;
10320
10321 @@ -182,14 +205,62 @@ static __always_inline unsigned long
10322 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10323 unsigned long n)
10324 {
10325 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10326 + if ((long)n < 0)
10327 + return n;
10328 +
10329 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10330 +}
10331 +
10332 +/**
10333 + * copy_to_user: - Copy a block of data into user space.
10334 + * @to: Destination address, in user space.
10335 + * @from: Source address, in kernel space.
10336 + * @n: Number of bytes to copy.
10337 + *
10338 + * Context: User context only. This function may sleep.
10339 + *
10340 + * Copy data from kernel space to user space.
10341 + *
10342 + * Returns number of bytes that could not be copied.
10343 + * On success, this will be zero.
10344 + */
10345 +static __always_inline unsigned long __must_check
10346 +copy_to_user(void __user *to, const void *from, unsigned long n)
10347 +{
10348 + if (access_ok(VERIFY_WRITE, to, n))
10349 + n = __copy_to_user(to, from, n);
10350 + return n;
10351 +}
10352 +
10353 +/**
10354 + * copy_from_user: - Copy a block of data from user space.
10355 + * @to: Destination address, in kernel space.
10356 + * @from: Source address, in user space.
10357 + * @n: Number of bytes to copy.
10358 + *
10359 + * Context: User context only. This function may sleep.
10360 + *
10361 + * Copy data from user space to kernel space.
10362 + *
10363 + * Returns number of bytes that could not be copied.
10364 + * On success, this will be zero.
10365 + *
10366 + * If some data could not be copied, this function will pad the copied
10367 + * data to the requested size using zero bytes.
10368 + */
10369 +static __always_inline unsigned long __must_check
10370 +copy_from_user(void *to, const void __user *from, unsigned long n)
10371 +{
10372 + if (access_ok(VERIFY_READ, from, n))
10373 + n = __copy_from_user(to, from, n);
10374 + else if ((long)n > 0) {
10375 + if (!__builtin_constant_p(n))
10376 + check_object_size(to, n, false);
10377 + memset(to, 0, n);
10378 + }
10379 + return n;
10380 }
10381
10382 -unsigned long __must_check copy_to_user(void __user *to,
10383 - const void *from, unsigned long n);
10384 -unsigned long __must_check copy_from_user(void *to,
10385 - const void __user *from,
10386 - unsigned long n);
10387 long __must_check strncpy_from_user(char *dst, const char __user *src,
10388 long count);
10389 long __must_check __strncpy_from_user(char *dst,
10390 diff -urNp linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h
10391 --- linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10392 +++ linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10393 @@ -9,6 +9,9 @@
10394 #include <linux/prefetch.h>
10395 #include <linux/lockdep.h>
10396 #include <asm/page.h>
10397 +#include <asm/pgtable.h>
10398 +
10399 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10400
10401 /*
10402 * Copy To/From Userspace
10403 @@ -19,113 +22,203 @@ __must_check unsigned long
10404 copy_user_generic(void *to, const void *from, unsigned len);
10405
10406 __must_check unsigned long
10407 -copy_to_user(void __user *to, const void *from, unsigned len);
10408 -__must_check unsigned long
10409 -copy_from_user(void *to, const void __user *from, unsigned len);
10410 -__must_check unsigned long
10411 copy_in_user(void __user *to, const void __user *from, unsigned len);
10412
10413 static __always_inline __must_check
10414 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10415 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10416 {
10417 - int ret = 0;
10418 + unsigned ret = 0;
10419
10420 might_fault();
10421 - if (!__builtin_constant_p(size))
10422 - return copy_user_generic(dst, (__force void *)src, size);
10423 +
10424 + if ((int)size < 0)
10425 + return size;
10426 +
10427 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10428 + if (!__access_ok(VERIFY_READ, src, size))
10429 + return size;
10430 +#endif
10431 +
10432 + if (!__builtin_constant_p(size)) {
10433 + check_object_size(dst, size, false);
10434 +
10435 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10436 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10437 + src += PAX_USER_SHADOW_BASE;
10438 +#endif
10439 +
10440 + return copy_user_generic(dst, (__force const void *)src, size);
10441 + }
10442 switch (size) {
10443 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10444 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10445 ret, "b", "b", "=q", 1);
10446 return ret;
10447 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10448 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10449 ret, "w", "w", "=r", 2);
10450 return ret;
10451 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10452 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10453 ret, "l", "k", "=r", 4);
10454 return ret;
10455 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10456 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10457 ret, "q", "", "=r", 8);
10458 return ret;
10459 case 10:
10460 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10461 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10462 ret, "q", "", "=r", 10);
10463 if (unlikely(ret))
10464 return ret;
10465 __get_user_asm(*(u16 *)(8 + (char *)dst),
10466 - (u16 __user *)(8 + (char __user *)src),
10467 + (const u16 __user *)(8 + (const char __user *)src),
10468 ret, "w", "w", "=r", 2);
10469 return ret;
10470 case 16:
10471 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10472 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10473 ret, "q", "", "=r", 16);
10474 if (unlikely(ret))
10475 return ret;
10476 __get_user_asm(*(u64 *)(8 + (char *)dst),
10477 - (u64 __user *)(8 + (char __user *)src),
10478 + (const u64 __user *)(8 + (const char __user *)src),
10479 ret, "q", "", "=r", 8);
10480 return ret;
10481 default:
10482 - return copy_user_generic(dst, (__force void *)src, size);
10483 +
10484 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10485 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10486 + src += PAX_USER_SHADOW_BASE;
10487 +#endif
10488 +
10489 + return copy_user_generic(dst, (__force const void *)src, size);
10490 }
10491 }
10492
10493 static __always_inline __must_check
10494 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
10495 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10496 {
10497 - int ret = 0;
10498 + unsigned ret = 0;
10499
10500 might_fault();
10501 - if (!__builtin_constant_p(size))
10502 +
10503 + pax_track_stack();
10504 +
10505 + if ((int)size < 0)
10506 + return size;
10507 +
10508 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10509 + if (!__access_ok(VERIFY_WRITE, dst, size))
10510 + return size;
10511 +#endif
10512 +
10513 + if (!__builtin_constant_p(size)) {
10514 + check_object_size(src, size, true);
10515 +
10516 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10517 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10518 + dst += PAX_USER_SHADOW_BASE;
10519 +#endif
10520 +
10521 return copy_user_generic((__force void *)dst, src, size);
10522 + }
10523 switch (size) {
10524 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10525 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10526 ret, "b", "b", "iq", 1);
10527 return ret;
10528 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10529 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10530 ret, "w", "w", "ir", 2);
10531 return ret;
10532 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10533 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10534 ret, "l", "k", "ir", 4);
10535 return ret;
10536 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10537 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10538 ret, "q", "", "er", 8);
10539 return ret;
10540 case 10:
10541 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10542 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10543 ret, "q", "", "er", 10);
10544 if (unlikely(ret))
10545 return ret;
10546 asm("":::"memory");
10547 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10548 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10549 ret, "w", "w", "ir", 2);
10550 return ret;
10551 case 16:
10552 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10553 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10554 ret, "q", "", "er", 16);
10555 if (unlikely(ret))
10556 return ret;
10557 asm("":::"memory");
10558 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10559 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10560 ret, "q", "", "er", 8);
10561 return ret;
10562 default:
10563 +
10564 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10565 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10566 + dst += PAX_USER_SHADOW_BASE;
10567 +#endif
10568 +
10569 return copy_user_generic((__force void *)dst, src, size);
10570 }
10571 }
10572
10573 static __always_inline __must_check
10574 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10575 +unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
10576 +{
10577 + if (access_ok(VERIFY_WRITE, to, len))
10578 + len = __copy_to_user(to, from, len);
10579 + return len;
10580 +}
10581 +
10582 +static __always_inline __must_check
10583 +unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
10584 +{
10585 + if ((int)len < 0)
10586 + return len;
10587 +
10588 + if (access_ok(VERIFY_READ, from, len))
10589 + len = __copy_from_user(to, from, len);
10590 + else if ((int)len > 0) {
10591 + if (!__builtin_constant_p(len))
10592 + check_object_size(to, len, false);
10593 + memset(to, 0, len);
10594 + }
10595 + return len;
10596 +}
10597 +
10598 +static __always_inline __must_check
10599 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10600 {
10601 - int ret = 0;
10602 + unsigned ret = 0;
10603
10604 might_fault();
10605 - if (!__builtin_constant_p(size))
10606 +
10607 + pax_track_stack();
10608 +
10609 + if ((int)size < 0)
10610 + return size;
10611 +
10612 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10613 + if (!__access_ok(VERIFY_READ, src, size))
10614 + return size;
10615 + if (!__access_ok(VERIFY_WRITE, dst, size))
10616 + return size;
10617 +#endif
10618 +
10619 + if (!__builtin_constant_p(size)) {
10620 +
10621 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10622 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10623 + src += PAX_USER_SHADOW_BASE;
10624 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10625 + dst += PAX_USER_SHADOW_BASE;
10626 +#endif
10627 +
10628 return copy_user_generic((__force void *)dst,
10629 - (__force void *)src, size);
10630 + (__force const void *)src, size);
10631 + }
10632 switch (size) {
10633 case 1: {
10634 u8 tmp;
10635 - __get_user_asm(tmp, (u8 __user *)src,
10636 + __get_user_asm(tmp, (const u8 __user *)src,
10637 ret, "b", "b", "=q", 1);
10638 if (likely(!ret))
10639 __put_user_asm(tmp, (u8 __user *)dst,
10640 @@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
10641 }
10642 case 2: {
10643 u16 tmp;
10644 - __get_user_asm(tmp, (u16 __user *)src,
10645 + __get_user_asm(tmp, (const u16 __user *)src,
10646 ret, "w", "w", "=r", 2);
10647 if (likely(!ret))
10648 __put_user_asm(tmp, (u16 __user *)dst,
10649 @@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
10650
10651 case 4: {
10652 u32 tmp;
10653 - __get_user_asm(tmp, (u32 __user *)src,
10654 + __get_user_asm(tmp, (const u32 __user *)src,
10655 ret, "l", "k", "=r", 4);
10656 if (likely(!ret))
10657 __put_user_asm(tmp, (u32 __user *)dst,
10658 @@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
10659 }
10660 case 8: {
10661 u64 tmp;
10662 - __get_user_asm(tmp, (u64 __user *)src,
10663 + __get_user_asm(tmp, (const u64 __user *)src,
10664 ret, "q", "", "=r", 8);
10665 if (likely(!ret))
10666 __put_user_asm(tmp, (u64 __user *)dst,
10667 @@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
10668 return ret;
10669 }
10670 default:
10671 +
10672 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10673 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10674 + src += PAX_USER_SHADOW_BASE;
10675 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10676 + dst += PAX_USER_SHADOW_BASE;
10677 +#endif
10678 +
10679 return copy_user_generic((__force void *)dst,
10680 - (__force void *)src, size);
10681 + (__force const void *)src, size);
10682 }
10683 }
10684
10685 @@ -176,33 +277,75 @@ __must_check long strlen_user(const char
10686 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
10687 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
10688
10689 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
10690 - unsigned size);
10691 +static __must_check __always_inline unsigned long
10692 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
10693 +{
10694 + pax_track_stack();
10695 +
10696 + if ((int)size < 0)
10697 + return size;
10698
10699 -static __must_check __always_inline int
10700 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10701 + if (!__access_ok(VERIFY_READ, src, size))
10702 + return size;
10703 +
10704 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10705 + src += PAX_USER_SHADOW_BASE;
10706 +#endif
10707 +
10708 + return copy_user_generic(dst, (__force const void *)src, size);
10709 +}
10710 +
10711 +static __must_check __always_inline unsigned long
10712 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
10713 {
10714 + if ((int)size < 0)
10715 + return size;
10716 +
10717 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10718 + if (!__access_ok(VERIFY_WRITE, dst, size))
10719 + return size;
10720 +
10721 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10722 + dst += PAX_USER_SHADOW_BASE;
10723 +#endif
10724 +
10725 return copy_user_generic((__force void *)dst, src, size);
10726 }
10727
10728 -extern long __copy_user_nocache(void *dst, const void __user *src,
10729 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
10730 unsigned size, int zerorest);
10731
10732 -static inline int
10733 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10734 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10735 {
10736 might_sleep();
10737 +
10738 + if ((int)size < 0)
10739 + return size;
10740 +
10741 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10742 + if (!__access_ok(VERIFY_READ, src, size))
10743 + return size;
10744 +#endif
10745 +
10746 return __copy_user_nocache(dst, src, size, 1);
10747 }
10748
10749 -static inline int
10750 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10751 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10752 unsigned size)
10753 {
10754 + if ((int)size < 0)
10755 + return size;
10756 +
10757 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10758 + if (!__access_ok(VERIFY_READ, src, size))
10759 + return size;
10760 +#endif
10761 +
10762 return __copy_user_nocache(dst, src, size, 0);
10763 }
10764
10765 -unsigned long
10766 +extern unsigned long
10767 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
10768
10769 #endif /* _ASM_X86_UACCESS_64_H */
10770 diff -urNp linux-2.6.32.42/arch/x86/include/asm/uaccess.h linux-2.6.32.42/arch/x86/include/asm/uaccess.h
10771 --- linux-2.6.32.42/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
10772 +++ linux-2.6.32.42/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
10773 @@ -8,12 +8,15 @@
10774 #include <linux/thread_info.h>
10775 #include <linux/prefetch.h>
10776 #include <linux/string.h>
10777 +#include <linux/sched.h>
10778 #include <asm/asm.h>
10779 #include <asm/page.h>
10780
10781 #define VERIFY_READ 0
10782 #define VERIFY_WRITE 1
10783
10784 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
10785 +
10786 /*
10787 * The fs value determines whether argument validity checking should be
10788 * performed or not. If get_fs() == USER_DS, checking is performed, with
10789 @@ -29,7 +32,12 @@
10790
10791 #define get_ds() (KERNEL_DS)
10792 #define get_fs() (current_thread_info()->addr_limit)
10793 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10794 +void __set_fs(mm_segment_t x);
10795 +void set_fs(mm_segment_t x);
10796 +#else
10797 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10798 +#endif
10799
10800 #define segment_eq(a, b) ((a).seg == (b).seg)
10801
10802 @@ -77,7 +85,33 @@
10803 * checks that the pointer is in the user space range - after calling
10804 * this function, memory access functions may still return -EFAULT.
10805 */
10806 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10807 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10808 +#define access_ok(type, addr, size) \
10809 +({ \
10810 + long __size = size; \
10811 + unsigned long __addr = (unsigned long)addr; \
10812 + unsigned long __addr_ao = __addr & PAGE_MASK; \
10813 + unsigned long __end_ao = __addr + __size - 1; \
10814 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10815 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10816 + while(__addr_ao <= __end_ao) { \
10817 + char __c_ao; \
10818 + __addr_ao += PAGE_SIZE; \
10819 + if (__size > PAGE_SIZE) \
10820 + cond_resched(); \
10821 + if (__get_user(__c_ao, (char __user *)__addr)) \
10822 + break; \
10823 + if (type != VERIFY_WRITE) { \
10824 + __addr = __addr_ao; \
10825 + continue; \
10826 + } \
10827 + if (__put_user(__c_ao, (char __user *)__addr)) \
10828 + break; \
10829 + __addr = __addr_ao; \
10830 + } \
10831 + } \
10832 + __ret_ao; \
10833 +})
10834
10835 /*
10836 * The exception table consists of pairs of addresses: the first is the
10837 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
10838 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10839 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10840
10841 -
10842 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10843 +#define __copyuser_seg "gs;"
10844 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10845 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10846 +#else
10847 +#define __copyuser_seg
10848 +#define __COPYUSER_SET_ES
10849 +#define __COPYUSER_RESTORE_ES
10850 +#endif
10851
10852 #ifdef CONFIG_X86_32
10853 #define __put_user_asm_u64(x, addr, err, errret) \
10854 - asm volatile("1: movl %%eax,0(%2)\n" \
10855 - "2: movl %%edx,4(%2)\n" \
10856 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10857 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10858 "3:\n" \
10859 ".section .fixup,\"ax\"\n" \
10860 "4: movl %3,%0\n" \
10861 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
10862 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10863
10864 #define __put_user_asm_ex_u64(x, addr) \
10865 - asm volatile("1: movl %%eax,0(%1)\n" \
10866 - "2: movl %%edx,4(%1)\n" \
10867 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10868 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10869 "3:\n" \
10870 _ASM_EXTABLE(1b, 2b - 1b) \
10871 _ASM_EXTABLE(2b, 3b - 2b) \
10872 @@ -374,7 +416,7 @@ do { \
10873 } while (0)
10874
10875 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10876 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10877 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10878 "2:\n" \
10879 ".section .fixup,\"ax\"\n" \
10880 "3: mov %3,%0\n" \
10881 @@ -382,7 +424,7 @@ do { \
10882 " jmp 2b\n" \
10883 ".previous\n" \
10884 _ASM_EXTABLE(1b, 3b) \
10885 - : "=r" (err), ltype(x) \
10886 + : "=r" (err), ltype (x) \
10887 : "m" (__m(addr)), "i" (errret), "0" (err))
10888
10889 #define __get_user_size_ex(x, ptr, size) \
10890 @@ -407,7 +449,7 @@ do { \
10891 } while (0)
10892
10893 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10894 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10895 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10896 "2:\n" \
10897 _ASM_EXTABLE(1b, 2b - 1b) \
10898 : ltype(x) : "m" (__m(addr)))
10899 @@ -424,13 +466,24 @@ do { \
10900 int __gu_err; \
10901 unsigned long __gu_val; \
10902 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10903 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
10904 + (x) = (__typeof__(*(ptr)))__gu_val; \
10905 __gu_err; \
10906 })
10907
10908 /* FIXME: this hack is definitely wrong -AK */
10909 struct __large_struct { unsigned long buf[100]; };
10910 -#define __m(x) (*(struct __large_struct __user *)(x))
10911 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10912 +#define ____m(x) \
10913 +({ \
10914 + unsigned long ____x = (unsigned long)(x); \
10915 + if (____x < PAX_USER_SHADOW_BASE) \
10916 + ____x += PAX_USER_SHADOW_BASE; \
10917 + (void __user *)____x; \
10918 +})
10919 +#else
10920 +#define ____m(x) (x)
10921 +#endif
10922 +#define __m(x) (*(struct __large_struct __user *)____m(x))
10923
10924 /*
10925 * Tell gcc we read from memory instead of writing: this is because
10926 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
10927 * aliasing issues.
10928 */
10929 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10930 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10931 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10932 "2:\n" \
10933 ".section .fixup,\"ax\"\n" \
10934 "3: mov %3,%0\n" \
10935 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
10936 ".previous\n" \
10937 _ASM_EXTABLE(1b, 3b) \
10938 : "=r"(err) \
10939 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10940 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10941
10942 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10943 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10944 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10945 "2:\n" \
10946 _ASM_EXTABLE(1b, 2b - 1b) \
10947 : : ltype(x), "m" (__m(addr)))
10948 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
10949 * On error, the variable @x is set to zero.
10950 */
10951
10952 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10953 +#define __get_user(x, ptr) get_user((x), (ptr))
10954 +#else
10955 #define __get_user(x, ptr) \
10956 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10957 +#endif
10958
10959 /**
10960 * __put_user: - Write a simple value into user space, with less checking.
10961 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
10962 * Returns zero on success, or -EFAULT on error.
10963 */
10964
10965 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10966 +#define __put_user(x, ptr) put_user((x), (ptr))
10967 +#else
10968 #define __put_user(x, ptr) \
10969 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10970 +#endif
10971
10972 #define __get_user_unaligned __get_user
10973 #define __put_user_unaligned __put_user
10974 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
10975 #define get_user_ex(x, ptr) do { \
10976 unsigned long __gue_val; \
10977 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10978 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
10979 + (x) = (__typeof__(*(ptr)))__gue_val; \
10980 } while (0)
10981
10982 #ifdef CONFIG_X86_WP_WORKS_OK
10983 @@ -567,6 +628,7 @@ extern struct movsl_mask {
10984
10985 #define ARCH_HAS_NOCACHE_UACCESS 1
10986
10987 +#define ARCH_HAS_SORT_EXTABLE
10988 #ifdef CONFIG_X86_32
10989 # include "uaccess_32.h"
10990 #else
10991 diff -urNp linux-2.6.32.42/arch/x86/include/asm/vgtod.h linux-2.6.32.42/arch/x86/include/asm/vgtod.h
10992 --- linux-2.6.32.42/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
10993 +++ linux-2.6.32.42/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
10994 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
10995 int sysctl_enabled;
10996 struct timezone sys_tz;
10997 struct { /* extract of a clocksource struct */
10998 + char name[8];
10999 cycle_t (*vread)(void);
11000 cycle_t cycle_last;
11001 cycle_t mask;
11002 diff -urNp linux-2.6.32.42/arch/x86/include/asm/vmi.h linux-2.6.32.42/arch/x86/include/asm/vmi.h
11003 --- linux-2.6.32.42/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11004 +++ linux-2.6.32.42/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11005 @@ -191,6 +191,7 @@ struct vrom_header {
11006 u8 reserved[96]; /* Reserved for headers */
11007 char vmi_init[8]; /* VMI_Init jump point */
11008 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11009 + char rom_data[8048]; /* rest of the option ROM */
11010 } __attribute__((packed));
11011
11012 struct pnp_header {
11013 diff -urNp linux-2.6.32.42/arch/x86/include/asm/vsyscall.h linux-2.6.32.42/arch/x86/include/asm/vsyscall.h
11014 --- linux-2.6.32.42/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11015 +++ linux-2.6.32.42/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11016 @@ -15,9 +15,10 @@ enum vsyscall_num {
11017
11018 #ifdef __KERNEL__
11019 #include <linux/seqlock.h>
11020 +#include <linux/getcpu.h>
11021 +#include <linux/time.h>
11022
11023 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11024 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11025
11026 /* Definitions for CONFIG_GENERIC_TIME definitions */
11027 #define __section_vsyscall_gtod_data __attribute__ \
11028 @@ -31,7 +32,6 @@ enum vsyscall_num {
11029 #define VGETCPU_LSL 2
11030
11031 extern int __vgetcpu_mode;
11032 -extern volatile unsigned long __jiffies;
11033
11034 /* kernel space (writeable) */
11035 extern int vgetcpu_mode;
11036 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11037
11038 extern void map_vsyscall(void);
11039
11040 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11041 +extern time_t vtime(time_t *t);
11042 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11043 #endif /* __KERNEL__ */
11044
11045 #endif /* _ASM_X86_VSYSCALL_H */
11046 diff -urNp linux-2.6.32.42/arch/x86/include/asm/xsave.h linux-2.6.32.42/arch/x86/include/asm/xsave.h
11047 --- linux-2.6.32.42/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11048 +++ linux-2.6.32.42/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11049 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11050 static inline int xsave_user(struct xsave_struct __user *buf)
11051 {
11052 int err;
11053 +
11054 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11055 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11056 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11057 +#endif
11058 +
11059 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11060 "2:\n"
11061 ".section .fixup,\"ax\"\n"
11062 @@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11063 u32 lmask = mask;
11064 u32 hmask = mask >> 32;
11065
11066 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11067 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11068 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11069 +#endif
11070 +
11071 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11072 "2:\n"
11073 ".section .fixup,\"ax\"\n"
11074 diff -urNp linux-2.6.32.42/arch/x86/Kconfig linux-2.6.32.42/arch/x86/Kconfig
11075 --- linux-2.6.32.42/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11076 +++ linux-2.6.32.42/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11077 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11078
11079 config X86_32_LAZY_GS
11080 def_bool y
11081 - depends on X86_32 && !CC_STACKPROTECTOR
11082 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11083
11084 config KTIME_SCALAR
11085 def_bool X86_32
11086 @@ -1008,7 +1008,7 @@ choice
11087
11088 config NOHIGHMEM
11089 bool "off"
11090 - depends on !X86_NUMAQ
11091 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11092 ---help---
11093 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11094 However, the address space of 32-bit x86 processors is only 4
11095 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
11096
11097 config HIGHMEM4G
11098 bool "4GB"
11099 - depends on !X86_NUMAQ
11100 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11101 ---help---
11102 Select this if you have a 32-bit processor and between 1 and 4
11103 gigabytes of physical RAM.
11104 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11105 hex
11106 default 0xB0000000 if VMSPLIT_3G_OPT
11107 default 0x80000000 if VMSPLIT_2G
11108 - default 0x78000000 if VMSPLIT_2G_OPT
11109 + default 0x70000000 if VMSPLIT_2G_OPT
11110 default 0x40000000 if VMSPLIT_1G
11111 default 0xC0000000
11112 depends on X86_32
11113 @@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11114
11115 config EFI
11116 bool "EFI runtime service support"
11117 - depends on ACPI
11118 + depends on ACPI && !PAX_KERNEXEC
11119 ---help---
11120 This enables the kernel to use EFI runtime services that are
11121 available (such as the EFI variable services).
11122 @@ -1460,6 +1460,7 @@ config SECCOMP
11123
11124 config CC_STACKPROTECTOR
11125 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11126 + depends on X86_64 || !PAX_MEMORY_UDEREF
11127 ---help---
11128 This option turns on the -fstack-protector GCC feature. This
11129 feature puts, at the beginning of functions, a canary value on
11130 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11131 config PHYSICAL_START
11132 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11133 default "0x1000000"
11134 + range 0x400000 0x40000000
11135 ---help---
11136 This gives the physical address where the kernel is loaded.
11137
11138 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11139 hex
11140 prompt "Alignment value to which kernel should be aligned" if X86_32
11141 default "0x1000000"
11142 + range 0x400000 0x1000000 if PAX_KERNEXEC
11143 range 0x2000 0x1000000
11144 ---help---
11145 This value puts the alignment restrictions on physical address
11146 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11147 Say N if you want to disable CPU hotplug.
11148
11149 config COMPAT_VDSO
11150 - def_bool y
11151 + def_bool n
11152 prompt "Compat VDSO support"
11153 depends on X86_32 || IA32_EMULATION
11154 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11155 ---help---
11156 Map the 32-bit VDSO to the predictable old-style address too.
11157 ---help---
11158 diff -urNp linux-2.6.32.42/arch/x86/Kconfig.cpu linux-2.6.32.42/arch/x86/Kconfig.cpu
11159 --- linux-2.6.32.42/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11160 +++ linux-2.6.32.42/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11161 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11162
11163 config X86_F00F_BUG
11164 def_bool y
11165 - depends on M586MMX || M586TSC || M586 || M486 || M386
11166 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11167
11168 config X86_WP_WORKS_OK
11169 def_bool y
11170 @@ -360,7 +360,7 @@ config X86_POPAD_OK
11171
11172 config X86_ALIGNMENT_16
11173 def_bool y
11174 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11175 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11176
11177 config X86_INTEL_USERCOPY
11178 def_bool y
11179 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
11180 # generates cmov.
11181 config X86_CMOV
11182 def_bool y
11183 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11184 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11185
11186 config X86_MINIMUM_CPU_FAMILY
11187 int
11188 diff -urNp linux-2.6.32.42/arch/x86/Kconfig.debug linux-2.6.32.42/arch/x86/Kconfig.debug
11189 --- linux-2.6.32.42/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11190 +++ linux-2.6.32.42/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11191 @@ -99,7 +99,7 @@ config X86_PTDUMP
11192 config DEBUG_RODATA
11193 bool "Write protect kernel read-only data structures"
11194 default y
11195 - depends on DEBUG_KERNEL
11196 + depends on DEBUG_KERNEL && BROKEN
11197 ---help---
11198 Mark the kernel read-only data as write-protected in the pagetables,
11199 in order to catch accidental (and incorrect) writes to such const
11200 diff -urNp linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S
11201 --- linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11202 +++ linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S 2011-04-17 15:56:46.000000000 -0400
11203 @@ -104,7 +104,7 @@ _start:
11204 movl %eax, %ecx
11205 orl %edx, %ecx
11206 jz 1f
11207 - movl $0xc0000080, %ecx
11208 + mov $MSR_EFER, %ecx
11209 wrmsr
11210 1:
11211
11212 diff -urNp linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c
11213 --- linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11214 +++ linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
11215 @@ -11,11 +11,12 @@
11216 #include <linux/cpumask.h>
11217 #include <asm/segment.h>
11218 #include <asm/desc.h>
11219 +#include <asm/e820.h>
11220
11221 #include "realmode/wakeup.h"
11222 #include "sleep.h"
11223
11224 -unsigned long acpi_wakeup_address;
11225 +unsigned long acpi_wakeup_address = 0x2000;
11226 unsigned long acpi_realmode_flags;
11227
11228 /* address in low memory of the wakeup routine. */
11229 @@ -99,8 +100,12 @@ int acpi_save_state_mem(void)
11230 header->trampoline_segment = setup_trampoline() >> 4;
11231 #ifdef CONFIG_SMP
11232 stack_start.sp = temp_stack + sizeof(temp_stack);
11233 +
11234 + pax_open_kernel();
11235 early_gdt_descr.address =
11236 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11237 + pax_close_kernel();
11238 +
11239 initial_gs = per_cpu_offset(smp_processor_id());
11240 #endif
11241 initial_code = (unsigned long)wakeup_long64;
11242 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11243 return;
11244 }
11245
11246 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11247 -
11248 - if (!acpi_realmode) {
11249 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11250 - return;
11251 - }
11252 -
11253 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11254 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11255 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11256 }
11257
11258
11259 diff -urNp linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S
11260 --- linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11261 +++ linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11262 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11263 # and restore the stack ... but you need gdt for this to work
11264 movl saved_context_esp, %esp
11265
11266 - movl %cs:saved_magic, %eax
11267 - cmpl $0x12345678, %eax
11268 + cmpl $0x12345678, saved_magic
11269 jne bogus_magic
11270
11271 # jump to place where we left off
11272 - movl saved_eip, %eax
11273 - jmp *%eax
11274 + jmp *(saved_eip)
11275
11276 bogus_magic:
11277 jmp bogus_magic
11278 diff -urNp linux-2.6.32.42/arch/x86/kernel/alternative.c linux-2.6.32.42/arch/x86/kernel/alternative.c
11279 --- linux-2.6.32.42/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11280 +++ linux-2.6.32.42/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11281 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11282
11283 BUG_ON(p->len > MAX_PATCH_LEN);
11284 /* prep the buffer with the original instructions */
11285 - memcpy(insnbuf, p->instr, p->len);
11286 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11287 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11288 (unsigned long)p->instr, p->len);
11289
11290 @@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11291 if (smp_alt_once)
11292 free_init_pages("SMP alternatives",
11293 (unsigned long)__smp_locks,
11294 - (unsigned long)__smp_locks_end);
11295 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11296
11297 restart_nmi();
11298 }
11299 @@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11300 * instructions. And on the local CPU you need to be protected again NMI or MCE
11301 * handlers seeing an inconsistent instruction while you patch.
11302 */
11303 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11304 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
11305 size_t len)
11306 {
11307 unsigned long flags;
11308 local_irq_save(flags);
11309 - memcpy(addr, opcode, len);
11310 +
11311 + pax_open_kernel();
11312 + memcpy(ktla_ktva(addr), opcode, len);
11313 sync_core();
11314 + pax_close_kernel();
11315 +
11316 local_irq_restore(flags);
11317 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11318 that causes hangs on some VIA CPUs. */
11319 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11320 */
11321 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11322 {
11323 - unsigned long flags;
11324 - char *vaddr;
11325 + unsigned char *vaddr = ktla_ktva(addr);
11326 struct page *pages[2];
11327 - int i;
11328 + size_t i;
11329
11330 if (!core_kernel_text((unsigned long)addr)) {
11331 - pages[0] = vmalloc_to_page(addr);
11332 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11333 + pages[0] = vmalloc_to_page(vaddr);
11334 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11335 } else {
11336 - pages[0] = virt_to_page(addr);
11337 + pages[0] = virt_to_page(vaddr);
11338 WARN_ON(!PageReserved(pages[0]));
11339 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11340 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11341 }
11342 BUG_ON(!pages[0]);
11343 - local_irq_save(flags);
11344 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11345 - if (pages[1])
11346 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11347 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11348 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11349 - clear_fixmap(FIX_TEXT_POKE0);
11350 - if (pages[1])
11351 - clear_fixmap(FIX_TEXT_POKE1);
11352 - local_flush_tlb();
11353 - sync_core();
11354 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11355 - that causes hangs on some VIA CPUs. */
11356 + text_poke_early(addr, opcode, len);
11357 for (i = 0; i < len; i++)
11358 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11359 - local_irq_restore(flags);
11360 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11361 return addr;
11362 }
11363 diff -urNp linux-2.6.32.42/arch/x86/kernel/amd_iommu.c linux-2.6.32.42/arch/x86/kernel/amd_iommu.c
11364 --- linux-2.6.32.42/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11365 +++ linux-2.6.32.42/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11366 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11367 }
11368 }
11369
11370 -static struct dma_map_ops amd_iommu_dma_ops = {
11371 +static const struct dma_map_ops amd_iommu_dma_ops = {
11372 .alloc_coherent = alloc_coherent,
11373 .free_coherent = free_coherent,
11374 .map_page = map_page,
11375 diff -urNp linux-2.6.32.42/arch/x86/kernel/apic/apic.c linux-2.6.32.42/arch/x86/kernel/apic/apic.c
11376 --- linux-2.6.32.42/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11377 +++ linux-2.6.32.42/arch/x86/kernel/apic/apic.c 2011-05-16 21:46:57.000000000 -0400
11378 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11379 apic_write(APIC_ESR, 0);
11380 v1 = apic_read(APIC_ESR);
11381 ack_APIC_irq();
11382 - atomic_inc(&irq_err_count);
11383 + atomic_inc_unchecked(&irq_err_count);
11384
11385 /*
11386 * Here is what the APIC error bits mean:
11387 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11388 u16 *bios_cpu_apicid;
11389 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11390
11391 + pax_track_stack();
11392 +
11393 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11394 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11395
11396 diff -urNp linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c
11397 --- linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11398 +++ linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11399 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11400 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11401 GFP_ATOMIC);
11402 if (!ioapic_entries)
11403 - return 0;
11404 + return NULL;
11405
11406 for (apic = 0; apic < nr_ioapics; apic++) {
11407 ioapic_entries[apic] =
11408 @@ -733,7 +733,7 @@ nomem:
11409 kfree(ioapic_entries[apic]);
11410 kfree(ioapic_entries);
11411
11412 - return 0;
11413 + return NULL;
11414 }
11415
11416 /*
11417 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11418 }
11419 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11420
11421 -void lock_vector_lock(void)
11422 +void lock_vector_lock(void) __acquires(vector_lock)
11423 {
11424 /* Used to the online set of cpus does not change
11425 * during assign_irq_vector.
11426 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11427 spin_lock(&vector_lock);
11428 }
11429
11430 -void unlock_vector_lock(void)
11431 +void unlock_vector_lock(void) __releases(vector_lock)
11432 {
11433 spin_unlock(&vector_lock);
11434 }
11435 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
11436 ack_APIC_irq();
11437 }
11438
11439 -atomic_t irq_mis_count;
11440 +atomic_unchecked_t irq_mis_count;
11441
11442 static void ack_apic_level(unsigned int irq)
11443 {
11444 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
11445
11446 /* Tail end of version 0x11 I/O APIC bug workaround */
11447 if (!(v & (1 << (i & 0x1f)))) {
11448 - atomic_inc(&irq_mis_count);
11449 + atomic_inc_unchecked(&irq_mis_count);
11450 spin_lock(&ioapic_lock);
11451 __mask_and_edge_IO_APIC_irq(cfg);
11452 __unmask_and_level_IO_APIC_irq(cfg);
11453 diff -urNp linux-2.6.32.42/arch/x86/kernel/apm_32.c linux-2.6.32.42/arch/x86/kernel/apm_32.c
11454 --- linux-2.6.32.42/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
11455 +++ linux-2.6.32.42/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
11456 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
11457 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11458 * even though they are called in protected mode.
11459 */
11460 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11461 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11462 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11463
11464 static const char driver_version[] = "1.16ac"; /* no spaces */
11465 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
11466 BUG_ON(cpu != 0);
11467 gdt = get_cpu_gdt_table(cpu);
11468 save_desc_40 = gdt[0x40 / 8];
11469 +
11470 + pax_open_kernel();
11471 gdt[0x40 / 8] = bad_bios_desc;
11472 + pax_close_kernel();
11473
11474 apm_irq_save(flags);
11475 APM_DO_SAVE_SEGS;
11476 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
11477 &call->esi);
11478 APM_DO_RESTORE_SEGS;
11479 apm_irq_restore(flags);
11480 +
11481 + pax_open_kernel();
11482 gdt[0x40 / 8] = save_desc_40;
11483 + pax_close_kernel();
11484 +
11485 put_cpu();
11486
11487 return call->eax & 0xff;
11488 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
11489 BUG_ON(cpu != 0);
11490 gdt = get_cpu_gdt_table(cpu);
11491 save_desc_40 = gdt[0x40 / 8];
11492 +
11493 + pax_open_kernel();
11494 gdt[0x40 / 8] = bad_bios_desc;
11495 + pax_close_kernel();
11496
11497 apm_irq_save(flags);
11498 APM_DO_SAVE_SEGS;
11499 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
11500 &call->eax);
11501 APM_DO_RESTORE_SEGS;
11502 apm_irq_restore(flags);
11503 +
11504 + pax_open_kernel();
11505 gdt[0x40 / 8] = save_desc_40;
11506 + pax_close_kernel();
11507 +
11508 put_cpu();
11509 return error;
11510 }
11511 @@ -975,7 +989,7 @@ recalc:
11512
11513 static void apm_power_off(void)
11514 {
11515 - unsigned char po_bios_call[] = {
11516 + const unsigned char po_bios_call[] = {
11517 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
11518 0x8e, 0xd0, /* movw ax,ss */
11519 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
11520 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
11521 * code to that CPU.
11522 */
11523 gdt = get_cpu_gdt_table(0);
11524 +
11525 + pax_open_kernel();
11526 set_desc_base(&gdt[APM_CS >> 3],
11527 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11528 set_desc_base(&gdt[APM_CS_16 >> 3],
11529 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11530 set_desc_base(&gdt[APM_DS >> 3],
11531 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11532 + pax_close_kernel();
11533
11534 proc_create("apm", 0, NULL, &apm_file_ops);
11535
11536 diff -urNp linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c
11537 --- linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
11538 +++ linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
11539 @@ -51,7 +51,6 @@ void foo(void)
11540 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
11541 BLANK();
11542
11543 - OFFSET(TI_task, thread_info, task);
11544 OFFSET(TI_exec_domain, thread_info, exec_domain);
11545 OFFSET(TI_flags, thread_info, flags);
11546 OFFSET(TI_status, thread_info, status);
11547 @@ -60,6 +59,8 @@ void foo(void)
11548 OFFSET(TI_restart_block, thread_info, restart_block);
11549 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
11550 OFFSET(TI_cpu, thread_info, cpu);
11551 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11552 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11553 BLANK();
11554
11555 OFFSET(GDS_size, desc_ptr, size);
11556 @@ -99,6 +100,7 @@ void foo(void)
11557
11558 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11559 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11560 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11561 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
11562 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
11563 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
11564 @@ -115,6 +117,11 @@ void foo(void)
11565 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
11566 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11567 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11568 +
11569 +#ifdef CONFIG_PAX_KERNEXEC
11570 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11571 +#endif
11572 +
11573 #endif
11574
11575 #ifdef CONFIG_XEN
11576 diff -urNp linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c
11577 --- linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
11578 +++ linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
11579 @@ -44,6 +44,8 @@ int main(void)
11580 ENTRY(addr_limit);
11581 ENTRY(preempt_count);
11582 ENTRY(status);
11583 + ENTRY(lowest_stack);
11584 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11585 #ifdef CONFIG_IA32_EMULATION
11586 ENTRY(sysenter_return);
11587 #endif
11588 @@ -63,6 +65,18 @@ int main(void)
11589 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11590 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
11591 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11592 +
11593 +#ifdef CONFIG_PAX_KERNEXEC
11594 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11595 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11596 +#endif
11597 +
11598 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11599 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11600 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11601 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
11602 +#endif
11603 +
11604 #endif
11605
11606
11607 @@ -115,6 +129,7 @@ int main(void)
11608 ENTRY(cr8);
11609 BLANK();
11610 #undef ENTRY
11611 + DEFINE(TSS_size, sizeof(struct tss_struct));
11612 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
11613 BLANK();
11614 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
11615 @@ -130,6 +145,7 @@ int main(void)
11616
11617 BLANK();
11618 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11619 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11620 #ifdef CONFIG_XEN
11621 BLANK();
11622 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11623 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/amd.c linux-2.6.32.42/arch/x86/kernel/cpu/amd.c
11624 --- linux-2.6.32.42/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
11625 +++ linux-2.6.32.42/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
11626 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
11627 unsigned int size)
11628 {
11629 /* AMD errata T13 (order #21922) */
11630 - if ((c->x86 == 6)) {
11631 + if (c->x86 == 6) {
11632 /* Duron Rev A0 */
11633 if (c->x86_model == 3 && c->x86_mask == 0)
11634 size = 64;
11635 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/common.c linux-2.6.32.42/arch/x86/kernel/cpu/common.c
11636 --- linux-2.6.32.42/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
11637 +++ linux-2.6.32.42/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
11638 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
11639
11640 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11641
11642 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11643 -#ifdef CONFIG_X86_64
11644 - /*
11645 - * We need valid kernel segments for data and code in long mode too
11646 - * IRET will check the segment types kkeil 2000/10/28
11647 - * Also sysret mandates a special GDT layout
11648 - *
11649 - * TLS descriptors are currently at a different place compared to i386.
11650 - * Hopefully nobody expects them at a fixed place (Wine?)
11651 - */
11652 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11653 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11654 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11655 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11656 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11657 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11658 -#else
11659 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11660 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11661 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11662 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11663 - /*
11664 - * Segments used for calling PnP BIOS have byte granularity.
11665 - * They code segments and data segments have fixed 64k limits,
11666 - * the transfer segment sizes are set at run time.
11667 - */
11668 - /* 32-bit code */
11669 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11670 - /* 16-bit code */
11671 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11672 - /* 16-bit data */
11673 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11674 - /* 16-bit data */
11675 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11676 - /* 16-bit data */
11677 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11678 - /*
11679 - * The APM segments have byte granularity and their bases
11680 - * are set at run time. All have 64k limits.
11681 - */
11682 - /* 32-bit code */
11683 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11684 - /* 16-bit code */
11685 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11686 - /* data */
11687 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11688 -
11689 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11690 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11691 - GDT_STACK_CANARY_INIT
11692 -#endif
11693 -} };
11694 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11695 -
11696 static int __init x86_xsave_setup(char *s)
11697 {
11698 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
11699 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
11700 {
11701 struct desc_ptr gdt_descr;
11702
11703 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11704 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11705 gdt_descr.size = GDT_SIZE - 1;
11706 load_gdt(&gdt_descr);
11707 /* Reload the per-cpu base */
11708 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
11709 /* Filter out anything that depends on CPUID levels we don't have */
11710 filter_cpuid_features(c, true);
11711
11712 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
11713 + setup_clear_cpu_cap(X86_FEATURE_SEP);
11714 +#endif
11715 +
11716 /* If the model name is still unset, do table lookup. */
11717 if (!c->x86_model_id[0]) {
11718 const char *p;
11719 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
11720 }
11721 __setup("clearcpuid=", setup_disablecpuid);
11722
11723 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11724 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
11725 +
11726 #ifdef CONFIG_X86_64
11727 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11728
11729 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
11730 EXPORT_PER_CPU_SYMBOL(current_task);
11731
11732 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11733 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
11734 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
11735 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11736
11737 DEFINE_PER_CPU(char *, irq_stack_ptr) =
11738 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
11739 {
11740 memset(regs, 0, sizeof(struct pt_regs));
11741 regs->fs = __KERNEL_PERCPU;
11742 - regs->gs = __KERNEL_STACK_CANARY;
11743 + savesegment(gs, regs->gs);
11744
11745 return regs;
11746 }
11747 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
11748 int i;
11749
11750 cpu = stack_smp_processor_id();
11751 - t = &per_cpu(init_tss, cpu);
11752 + t = init_tss + cpu;
11753 orig_ist = &per_cpu(orig_ist, cpu);
11754
11755 #ifdef CONFIG_NUMA
11756 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
11757 switch_to_new_gdt(cpu);
11758 loadsegment(fs, 0);
11759
11760 - load_idt((const struct desc_ptr *)&idt_descr);
11761 + load_idt(&idt_descr);
11762
11763 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
11764 syscall_init();
11765 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
11766 wrmsrl(MSR_KERNEL_GS_BASE, 0);
11767 barrier();
11768
11769 - check_efer();
11770 if (cpu != 0)
11771 enable_x2apic();
11772
11773 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
11774 {
11775 int cpu = smp_processor_id();
11776 struct task_struct *curr = current;
11777 - struct tss_struct *t = &per_cpu(init_tss, cpu);
11778 + struct tss_struct *t = init_tss + cpu;
11779 struct thread_struct *thread = &curr->thread;
11780
11781 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
11782 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/intel.c linux-2.6.32.42/arch/x86/kernel/cpu/intel.c
11783 --- linux-2.6.32.42/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
11784 +++ linux-2.6.32.42/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
11785 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
11786 * Update the IDT descriptor and reload the IDT so that
11787 * it uses the read-only mapped virtual address.
11788 */
11789 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
11790 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
11791 load_idt(&idt_descr);
11792 }
11793 #endif
11794 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c
11795 --- linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
11796 +++ linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
11797 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
11798 return ret;
11799 }
11800
11801 -static struct sysfs_ops sysfs_ops = {
11802 +static const struct sysfs_ops sysfs_ops = {
11803 .show = show,
11804 .store = store,
11805 };
11806 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/Makefile linux-2.6.32.42/arch/x86/kernel/cpu/Makefile
11807 --- linux-2.6.32.42/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
11808 +++ linux-2.6.32.42/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
11809 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
11810 CFLAGS_REMOVE_common.o = -pg
11811 endif
11812
11813 -# Make sure load_percpu_segment has no stackprotector
11814 -nostackp := $(call cc-option, -fno-stack-protector)
11815 -CFLAGS_common.o := $(nostackp)
11816 -
11817 obj-y := intel_cacheinfo.o addon_cpuid_features.o
11818 obj-y += proc.o capflags.o powerflags.o common.o
11819 obj-y += vmware.o hypervisor.o sched.o
11820 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c
11821 --- linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
11822 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
11823 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
11824 return ret;
11825 }
11826
11827 -static struct sysfs_ops threshold_ops = {
11828 +static const struct sysfs_ops threshold_ops = {
11829 .show = show,
11830 .store = store,
11831 };
11832 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c
11833 --- linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
11834 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
11835 @@ -43,6 +43,7 @@
11836 #include <asm/ipi.h>
11837 #include <asm/mce.h>
11838 #include <asm/msr.h>
11839 +#include <asm/local.h>
11840
11841 #include "mce-internal.h"
11842
11843 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
11844 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
11845 m->cs, m->ip);
11846
11847 - if (m->cs == __KERNEL_CS)
11848 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
11849 print_symbol("{%s}", m->ip);
11850 pr_cont("\n");
11851 }
11852 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
11853
11854 #define PANIC_TIMEOUT 5 /* 5 seconds */
11855
11856 -static atomic_t mce_paniced;
11857 +static atomic_unchecked_t mce_paniced;
11858
11859 static int fake_panic;
11860 -static atomic_t mce_fake_paniced;
11861 +static atomic_unchecked_t mce_fake_paniced;
11862
11863 /* Panic in progress. Enable interrupts and wait for final IPI */
11864 static void wait_for_panic(void)
11865 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
11866 /*
11867 * Make sure only one CPU runs in machine check panic
11868 */
11869 - if (atomic_inc_return(&mce_paniced) > 1)
11870 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11871 wait_for_panic();
11872 barrier();
11873
11874 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
11875 console_verbose();
11876 } else {
11877 /* Don't log too much for fake panic */
11878 - if (atomic_inc_return(&mce_fake_paniced) > 1)
11879 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11880 return;
11881 }
11882 print_mce_head();
11883 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
11884 * might have been modified by someone else.
11885 */
11886 rmb();
11887 - if (atomic_read(&mce_paniced))
11888 + if (atomic_read_unchecked(&mce_paniced))
11889 wait_for_panic();
11890 if (!monarch_timeout)
11891 goto out;
11892 @@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
11893 */
11894
11895 static DEFINE_SPINLOCK(mce_state_lock);
11896 -static int open_count; /* #times opened */
11897 +static local_t open_count; /* #times opened */
11898 static int open_exclu; /* already open exclusive? */
11899
11900 static int mce_open(struct inode *inode, struct file *file)
11901 {
11902 spin_lock(&mce_state_lock);
11903
11904 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
11905 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
11906 spin_unlock(&mce_state_lock);
11907
11908 return -EBUSY;
11909 @@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
11910
11911 if (file->f_flags & O_EXCL)
11912 open_exclu = 1;
11913 - open_count++;
11914 + local_inc(&open_count);
11915
11916 spin_unlock(&mce_state_lock);
11917
11918 @@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
11919 {
11920 spin_lock(&mce_state_lock);
11921
11922 - open_count--;
11923 + local_dec(&open_count);
11924 open_exclu = 0;
11925
11926 spin_unlock(&mce_state_lock);
11927 @@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
11928 static void mce_reset(void)
11929 {
11930 cpu_missing = 0;
11931 - atomic_set(&mce_fake_paniced, 0);
11932 + atomic_set_unchecked(&mce_fake_paniced, 0);
11933 atomic_set(&mce_executing, 0);
11934 atomic_set(&mce_callin, 0);
11935 atomic_set(&global_nwo, 0);
11936 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c
11937 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
11938 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
11939 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
11940 return 0;
11941 }
11942
11943 -static struct mtrr_ops amd_mtrr_ops = {
11944 +static const struct mtrr_ops amd_mtrr_ops = {
11945 .vendor = X86_VENDOR_AMD,
11946 .set = amd_set_mtrr,
11947 .get = amd_get_mtrr,
11948 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c
11949 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
11950 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
11951 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
11952 return 0;
11953 }
11954
11955 -static struct mtrr_ops centaur_mtrr_ops = {
11956 +static const struct mtrr_ops centaur_mtrr_ops = {
11957 .vendor = X86_VENDOR_CENTAUR,
11958 .set = centaur_set_mcr,
11959 .get = centaur_get_mcr,
11960 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c
11961 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
11962 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
11963 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
11964 post_set();
11965 }
11966
11967 -static struct mtrr_ops cyrix_mtrr_ops = {
11968 +static const struct mtrr_ops cyrix_mtrr_ops = {
11969 .vendor = X86_VENDOR_CYRIX,
11970 .set_all = cyrix_set_all,
11971 .set = cyrix_set_arr,
11972 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c
11973 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
11974 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
11975 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
11976 /*
11977 * Generic structure...
11978 */
11979 -struct mtrr_ops generic_mtrr_ops = {
11980 +const struct mtrr_ops generic_mtrr_ops = {
11981 .use_intel_if = 1,
11982 .set_all = generic_set_all,
11983 .get = generic_get_mtrr,
11984 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c
11985 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
11986 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
11987 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
11988 u64 size_or_mask, size_and_mask;
11989 static bool mtrr_aps_delayed_init;
11990
11991 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11992 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11993
11994 -struct mtrr_ops *mtrr_if;
11995 +const struct mtrr_ops *mtrr_if;
11996
11997 static void set_mtrr(unsigned int reg, unsigned long base,
11998 unsigned long size, mtrr_type type);
11999
12000 -void set_mtrr_ops(struct mtrr_ops *ops)
12001 +void set_mtrr_ops(const struct mtrr_ops *ops)
12002 {
12003 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12004 mtrr_ops[ops->vendor] = ops;
12005 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h
12006 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12007 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
12008 @@ -12,19 +12,19 @@
12009 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
12010
12011 struct mtrr_ops {
12012 - u32 vendor;
12013 - u32 use_intel_if;
12014 - void (*set)(unsigned int reg, unsigned long base,
12015 + const u32 vendor;
12016 + const u32 use_intel_if;
12017 + void (* const set)(unsigned int reg, unsigned long base,
12018 unsigned long size, mtrr_type type);
12019 - void (*set_all)(void);
12020 + void (* const set_all)(void);
12021
12022 - void (*get)(unsigned int reg, unsigned long *base,
12023 + void (* const get)(unsigned int reg, unsigned long *base,
12024 unsigned long *size, mtrr_type *type);
12025 - int (*get_free_region)(unsigned long base, unsigned long size,
12026 + int (* const get_free_region)(unsigned long base, unsigned long size,
12027 int replace_reg);
12028 - int (*validate_add_page)(unsigned long base, unsigned long size,
12029 + int (* const validate_add_page)(unsigned long base, unsigned long size,
12030 unsigned int type);
12031 - int (*have_wrcomb)(void);
12032 + int (* const have_wrcomb)(void);
12033 };
12034
12035 extern int generic_get_free_region(unsigned long base, unsigned long size,
12036 @@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
12037 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12038 unsigned int type);
12039
12040 -extern struct mtrr_ops generic_mtrr_ops;
12041 +extern const struct mtrr_ops generic_mtrr_ops;
12042
12043 extern int positive_have_wrcomb(void);
12044
12045 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12046 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12047 void get_mtrr_state(void);
12048
12049 -extern void set_mtrr_ops(struct mtrr_ops *ops);
12050 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
12051
12052 extern u64 size_or_mask, size_and_mask;
12053 -extern struct mtrr_ops *mtrr_if;
12054 +extern const struct mtrr_ops *mtrr_if;
12055
12056 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12057 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12058 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c
12059 --- linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12060 +++ linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12061 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12062
12063 /* Interface defining a CPU specific perfctr watchdog */
12064 struct wd_ops {
12065 - int (*reserve)(void);
12066 - void (*unreserve)(void);
12067 - int (*setup)(unsigned nmi_hz);
12068 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12069 - void (*stop)(void);
12070 + int (* const reserve)(void);
12071 + void (* const unreserve)(void);
12072 + int (* const setup)(unsigned nmi_hz);
12073 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12074 + void (* const stop)(void);
12075 unsigned perfctr;
12076 unsigned evntsel;
12077 u64 checkbit;
12078 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12079 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12080 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12081
12082 +/* cannot be const */
12083 static struct wd_ops intel_arch_wd_ops;
12084
12085 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12086 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12087 return 1;
12088 }
12089
12090 +/* cannot be const */
12091 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12092 .reserve = single_msr_reserve,
12093 .unreserve = single_msr_unreserve,
12094 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c
12095 --- linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12096 +++ linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12097 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12098 * count to the generic event atomically:
12099 */
12100 again:
12101 - prev_raw_count = atomic64_read(&hwc->prev_count);
12102 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12103 rdmsrl(hwc->event_base + idx, new_raw_count);
12104
12105 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12106 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12107 new_raw_count) != prev_raw_count)
12108 goto again;
12109
12110 @@ -741,7 +741,7 @@ again:
12111 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12112 delta >>= shift;
12113
12114 - atomic64_add(delta, &event->count);
12115 + atomic64_add_unchecked(delta, &event->count);
12116 atomic64_sub(delta, &hwc->period_left);
12117
12118 return new_raw_count;
12119 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12120 * The hw event starts counting from this event offset,
12121 * mark it to be able to extra future deltas:
12122 */
12123 - atomic64_set(&hwc->prev_count, (u64)-left);
12124 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12125
12126 err = checking_wrmsrl(hwc->event_base + idx,
12127 (u64)(-left) & x86_pmu.event_mask);
12128 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12129 break;
12130
12131 callchain_store(entry, frame.return_address);
12132 - fp = frame.next_frame;
12133 + fp = (__force const void __user *)frame.next_frame;
12134 }
12135 }
12136
12137 diff -urNp linux-2.6.32.42/arch/x86/kernel/crash.c linux-2.6.32.42/arch/x86/kernel/crash.c
12138 --- linux-2.6.32.42/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12139 +++ linux-2.6.32.42/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12140 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12141 regs = args->regs;
12142
12143 #ifdef CONFIG_X86_32
12144 - if (!user_mode_vm(regs)) {
12145 + if (!user_mode(regs)) {
12146 crash_fixup_ss_esp(&fixed_regs, regs);
12147 regs = &fixed_regs;
12148 }
12149 diff -urNp linux-2.6.32.42/arch/x86/kernel/doublefault_32.c linux-2.6.32.42/arch/x86/kernel/doublefault_32.c
12150 --- linux-2.6.32.42/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12151 +++ linux-2.6.32.42/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12152 @@ -11,7 +11,7 @@
12153
12154 #define DOUBLEFAULT_STACKSIZE (1024)
12155 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12156 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12157 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12158
12159 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12160
12161 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12162 unsigned long gdt, tss;
12163
12164 store_gdt(&gdt_desc);
12165 - gdt = gdt_desc.address;
12166 + gdt = (unsigned long)gdt_desc.address;
12167
12168 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12169
12170 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12171 /* 0x2 bit is always set */
12172 .flags = X86_EFLAGS_SF | 0x2,
12173 .sp = STACK_START,
12174 - .es = __USER_DS,
12175 + .es = __KERNEL_DS,
12176 .cs = __KERNEL_CS,
12177 .ss = __KERNEL_DS,
12178 - .ds = __USER_DS,
12179 + .ds = __KERNEL_DS,
12180 .fs = __KERNEL_PERCPU,
12181
12182 .__cr3 = __pa_nodebug(swapper_pg_dir),
12183 diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c
12184 --- linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12185 +++ linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12186 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12187 #endif
12188
12189 for (;;) {
12190 - struct thread_info *context;
12191 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12192 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12193
12194 - context = (struct thread_info *)
12195 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12196 - bp = print_context_stack(context, stack, bp, ops,
12197 - data, NULL, &graph);
12198 -
12199 - stack = (unsigned long *)context->previous_esp;
12200 - if (!stack)
12201 + if (stack_start == task_stack_page(task))
12202 break;
12203 + stack = *(unsigned long **)stack_start;
12204 if (ops->stack(data, "IRQ") < 0)
12205 break;
12206 touch_nmi_watchdog();
12207 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12208 * When in-kernel, we also print out the stack and code at the
12209 * time of the fault..
12210 */
12211 - if (!user_mode_vm(regs)) {
12212 + if (!user_mode(regs)) {
12213 unsigned int code_prologue = code_bytes * 43 / 64;
12214 unsigned int code_len = code_bytes;
12215 unsigned char c;
12216 u8 *ip;
12217 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12218
12219 printk(KERN_EMERG "Stack:\n");
12220 show_stack_log_lvl(NULL, regs, &regs->sp,
12221 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12222
12223 printk(KERN_EMERG "Code: ");
12224
12225 - ip = (u8 *)regs->ip - code_prologue;
12226 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12227 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12228 /* try starting at IP */
12229 - ip = (u8 *)regs->ip;
12230 + ip = (u8 *)regs->ip + cs_base;
12231 code_len = code_len - code_prologue + 1;
12232 }
12233 for (i = 0; i < code_len; i++, ip++) {
12234 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12235 printk(" Bad EIP value.");
12236 break;
12237 }
12238 - if (ip == (u8 *)regs->ip)
12239 + if (ip == (u8 *)regs->ip + cs_base)
12240 printk("<%02x> ", c);
12241 else
12242 printk("%02x ", c);
12243 @@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12244 {
12245 unsigned short ud2;
12246
12247 + ip = ktla_ktva(ip);
12248 if (ip < PAGE_OFFSET)
12249 return 0;
12250 if (probe_kernel_address((unsigned short *)ip, ud2))
12251 diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c
12252 --- linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12253 +++ linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12254 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12255 unsigned long *irq_stack_end =
12256 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12257 unsigned used = 0;
12258 - struct thread_info *tinfo;
12259 int graph = 0;
12260 + void *stack_start;
12261
12262 if (!task)
12263 task = current;
12264 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12265 * current stack address. If the stacks consist of nested
12266 * exceptions
12267 */
12268 - tinfo = task_thread_info(task);
12269 for (;;) {
12270 char *id;
12271 unsigned long *estack_end;
12272 +
12273 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12274 &used, &id);
12275
12276 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12277 if (ops->stack(data, id) < 0)
12278 break;
12279
12280 - bp = print_context_stack(tinfo, stack, bp, ops,
12281 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12282 data, estack_end, &graph);
12283 ops->stack(data, "<EOE>");
12284 /*
12285 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12286 if (stack >= irq_stack && stack < irq_stack_end) {
12287 if (ops->stack(data, "IRQ") < 0)
12288 break;
12289 - bp = print_context_stack(tinfo, stack, bp,
12290 + bp = print_context_stack(task, irq_stack, stack, bp,
12291 ops, data, irq_stack_end, &graph);
12292 /*
12293 * We link to the next stack (which would be
12294 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12295 /*
12296 * This handles the process stack:
12297 */
12298 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12299 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12300 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12301 put_cpu();
12302 }
12303 EXPORT_SYMBOL(dump_trace);
12304 diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack.c linux-2.6.32.42/arch/x86/kernel/dumpstack.c
12305 --- linux-2.6.32.42/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12306 +++ linux-2.6.32.42/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12307 @@ -2,6 +2,9 @@
12308 * Copyright (C) 1991, 1992 Linus Torvalds
12309 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12310 */
12311 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12312 +#define __INCLUDED_BY_HIDESYM 1
12313 +#endif
12314 #include <linux/kallsyms.h>
12315 #include <linux/kprobes.h>
12316 #include <linux/uaccess.h>
12317 @@ -28,7 +31,7 @@ static int die_counter;
12318
12319 void printk_address(unsigned long address, int reliable)
12320 {
12321 - printk(" [<%p>] %s%pS\n", (void *) address,
12322 + printk(" [<%p>] %s%pA\n", (void *) address,
12323 reliable ? "" : "? ", (void *) address);
12324 }
12325
12326 @@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12327 static void
12328 print_ftrace_graph_addr(unsigned long addr, void *data,
12329 const struct stacktrace_ops *ops,
12330 - struct thread_info *tinfo, int *graph)
12331 + struct task_struct *task, int *graph)
12332 {
12333 - struct task_struct *task = tinfo->task;
12334 unsigned long ret_addr;
12335 int index = task->curr_ret_stack;
12336
12337 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12338 static inline void
12339 print_ftrace_graph_addr(unsigned long addr, void *data,
12340 const struct stacktrace_ops *ops,
12341 - struct thread_info *tinfo, int *graph)
12342 + struct task_struct *task, int *graph)
12343 { }
12344 #endif
12345
12346 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12347 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12348 */
12349
12350 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12351 - void *p, unsigned int size, void *end)
12352 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12353 {
12354 - void *t = tinfo;
12355 if (end) {
12356 if (p < end && p >= (end-THREAD_SIZE))
12357 return 1;
12358 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12359 }
12360
12361 unsigned long
12362 -print_context_stack(struct thread_info *tinfo,
12363 +print_context_stack(struct task_struct *task, void *stack_start,
12364 unsigned long *stack, unsigned long bp,
12365 const struct stacktrace_ops *ops, void *data,
12366 unsigned long *end, int *graph)
12367 {
12368 struct stack_frame *frame = (struct stack_frame *)bp;
12369
12370 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12371 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12372 unsigned long addr;
12373
12374 addr = *stack;
12375 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12376 } else {
12377 ops->address(data, addr, 0);
12378 }
12379 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12380 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12381 }
12382 stack++;
12383 }
12384 @@ -180,7 +180,7 @@ void dump_stack(void)
12385 #endif
12386
12387 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12388 - current->pid, current->comm, print_tainted(),
12389 + task_pid_nr(current), current->comm, print_tainted(),
12390 init_utsname()->release,
12391 (int)strcspn(init_utsname()->version, " "),
12392 init_utsname()->version);
12393 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12394 return flags;
12395 }
12396
12397 +extern void gr_handle_kernel_exploit(void);
12398 +
12399 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12400 {
12401 if (regs && kexec_should_crash(current))
12402 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12403 panic("Fatal exception in interrupt");
12404 if (panic_on_oops)
12405 panic("Fatal exception");
12406 - do_exit(signr);
12407 +
12408 + gr_handle_kernel_exploit();
12409 +
12410 + do_group_exit(signr);
12411 }
12412
12413 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12414 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12415 unsigned long flags = oops_begin();
12416 int sig = SIGSEGV;
12417
12418 - if (!user_mode_vm(regs))
12419 + if (!user_mode(regs))
12420 report_bug(regs->ip, regs);
12421
12422 if (__die(str, regs, err))
12423 diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack.h linux-2.6.32.42/arch/x86/kernel/dumpstack.h
12424 --- linux-2.6.32.42/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
12425 +++ linux-2.6.32.42/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
12426 @@ -15,7 +15,7 @@
12427 #endif
12428
12429 extern unsigned long
12430 -print_context_stack(struct thread_info *tinfo,
12431 +print_context_stack(struct task_struct *task, void *stack_start,
12432 unsigned long *stack, unsigned long bp,
12433 const struct stacktrace_ops *ops, void *data,
12434 unsigned long *end, int *graph);
12435 diff -urNp linux-2.6.32.42/arch/x86/kernel/e820.c linux-2.6.32.42/arch/x86/kernel/e820.c
12436 --- linux-2.6.32.42/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
12437 +++ linux-2.6.32.42/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
12438 @@ -733,7 +733,7 @@ struct early_res {
12439 };
12440 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
12441 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
12442 - {}
12443 + { 0, 0, {0}, 0 }
12444 };
12445
12446 static int __init find_overlapped_early(u64 start, u64 end)
12447 diff -urNp linux-2.6.32.42/arch/x86/kernel/early_printk.c linux-2.6.32.42/arch/x86/kernel/early_printk.c
12448 --- linux-2.6.32.42/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
12449 +++ linux-2.6.32.42/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
12450 @@ -7,6 +7,7 @@
12451 #include <linux/pci_regs.h>
12452 #include <linux/pci_ids.h>
12453 #include <linux/errno.h>
12454 +#include <linux/sched.h>
12455 #include <asm/io.h>
12456 #include <asm/processor.h>
12457 #include <asm/fcntl.h>
12458 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
12459 int n;
12460 va_list ap;
12461
12462 + pax_track_stack();
12463 +
12464 va_start(ap, fmt);
12465 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12466 early_console->write(early_console, buf, n);
12467 diff -urNp linux-2.6.32.42/arch/x86/kernel/efi_32.c linux-2.6.32.42/arch/x86/kernel/efi_32.c
12468 --- linux-2.6.32.42/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
12469 +++ linux-2.6.32.42/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
12470 @@ -38,70 +38,38 @@
12471 */
12472
12473 static unsigned long efi_rt_eflags;
12474 -static pgd_t efi_bak_pg_dir_pointer[2];
12475 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
12476
12477 -void efi_call_phys_prelog(void)
12478 +void __init efi_call_phys_prelog(void)
12479 {
12480 - unsigned long cr4;
12481 - unsigned long temp;
12482 struct desc_ptr gdt_descr;
12483
12484 local_irq_save(efi_rt_eflags);
12485
12486 - /*
12487 - * If I don't have PAE, I should just duplicate two entries in page
12488 - * directory. If I have PAE, I just need to duplicate one entry in
12489 - * page directory.
12490 - */
12491 - cr4 = read_cr4_safe();
12492
12493 - if (cr4 & X86_CR4_PAE) {
12494 - efi_bak_pg_dir_pointer[0].pgd =
12495 - swapper_pg_dir[pgd_index(0)].pgd;
12496 - swapper_pg_dir[0].pgd =
12497 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12498 - } else {
12499 - efi_bak_pg_dir_pointer[0].pgd =
12500 - swapper_pg_dir[pgd_index(0)].pgd;
12501 - efi_bak_pg_dir_pointer[1].pgd =
12502 - swapper_pg_dir[pgd_index(0x400000)].pgd;
12503 - swapper_pg_dir[pgd_index(0)].pgd =
12504 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12505 - temp = PAGE_OFFSET + 0x400000;
12506 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12507 - swapper_pg_dir[pgd_index(temp)].pgd;
12508 - }
12509 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
12510 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
12511 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
12512
12513 /*
12514 * After the lock is released, the original page table is restored.
12515 */
12516 __flush_tlb_all();
12517
12518 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
12519 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
12520 gdt_descr.size = GDT_SIZE - 1;
12521 load_gdt(&gdt_descr);
12522 }
12523
12524 -void efi_call_phys_epilog(void)
12525 +void __init efi_call_phys_epilog(void)
12526 {
12527 - unsigned long cr4;
12528 struct desc_ptr gdt_descr;
12529
12530 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
12531 + gdt_descr.address = get_cpu_gdt_table(0);
12532 gdt_descr.size = GDT_SIZE - 1;
12533 load_gdt(&gdt_descr);
12534
12535 - cr4 = read_cr4_safe();
12536 -
12537 - if (cr4 & X86_CR4_PAE) {
12538 - swapper_pg_dir[pgd_index(0)].pgd =
12539 - efi_bak_pg_dir_pointer[0].pgd;
12540 - } else {
12541 - swapper_pg_dir[pgd_index(0)].pgd =
12542 - efi_bak_pg_dir_pointer[0].pgd;
12543 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12544 - efi_bak_pg_dir_pointer[1].pgd;
12545 - }
12546 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
12547
12548 /*
12549 * After the lock is released, the original page table is restored.
12550 diff -urNp linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S
12551 --- linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
12552 +++ linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
12553 @@ -6,6 +6,7 @@
12554 */
12555
12556 #include <linux/linkage.h>
12557 +#include <linux/init.h>
12558 #include <asm/page_types.h>
12559
12560 /*
12561 @@ -20,7 +21,7 @@
12562 * service functions will comply with gcc calling convention, too.
12563 */
12564
12565 -.text
12566 +__INIT
12567 ENTRY(efi_call_phys)
12568 /*
12569 * 0. The function can only be called in Linux kernel. So CS has been
12570 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
12571 * The mapping of lower virtual memory has been created in prelog and
12572 * epilog.
12573 */
12574 - movl $1f, %edx
12575 - subl $__PAGE_OFFSET, %edx
12576 - jmp *%edx
12577 + jmp 1f-__PAGE_OFFSET
12578 1:
12579
12580 /*
12581 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
12582 * parameter 2, ..., param n. To make things easy, we save the return
12583 * address of efi_call_phys in a global variable.
12584 */
12585 - popl %edx
12586 - movl %edx, saved_return_addr
12587 - /* get the function pointer into ECX*/
12588 - popl %ecx
12589 - movl %ecx, efi_rt_function_ptr
12590 - movl $2f, %edx
12591 - subl $__PAGE_OFFSET, %edx
12592 - pushl %edx
12593 + popl (saved_return_addr)
12594 + popl (efi_rt_function_ptr)
12595
12596 /*
12597 * 3. Clear PG bit in %CR0.
12598 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
12599 /*
12600 * 5. Call the physical function.
12601 */
12602 - jmp *%ecx
12603 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
12604
12605 -2:
12606 /*
12607 * 6. After EFI runtime service returns, control will return to
12608 * following instruction. We'd better readjust stack pointer first.
12609 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
12610 movl %cr0, %edx
12611 orl $0x80000000, %edx
12612 movl %edx, %cr0
12613 - jmp 1f
12614 -1:
12615 +
12616 /*
12617 * 8. Now restore the virtual mode from flat mode by
12618 * adding EIP with PAGE_OFFSET.
12619 */
12620 - movl $1f, %edx
12621 - jmp *%edx
12622 + jmp 1f+__PAGE_OFFSET
12623 1:
12624
12625 /*
12626 * 9. Balance the stack. And because EAX contain the return value,
12627 * we'd better not clobber it.
12628 */
12629 - leal efi_rt_function_ptr, %edx
12630 - movl (%edx), %ecx
12631 - pushl %ecx
12632 + pushl (efi_rt_function_ptr)
12633
12634 /*
12635 - * 10. Push the saved return address onto the stack and return.
12636 + * 10. Return to the saved return address.
12637 */
12638 - leal saved_return_addr, %edx
12639 - movl (%edx), %ecx
12640 - pushl %ecx
12641 - ret
12642 + jmpl *(saved_return_addr)
12643 ENDPROC(efi_call_phys)
12644 .previous
12645
12646 -.data
12647 +__INITDATA
12648 saved_return_addr:
12649 .long 0
12650 efi_rt_function_ptr:
12651 diff -urNp linux-2.6.32.42/arch/x86/kernel/entry_32.S linux-2.6.32.42/arch/x86/kernel/entry_32.S
12652 --- linux-2.6.32.42/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
12653 +++ linux-2.6.32.42/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
12654 @@ -185,13 +185,146 @@
12655 /*CFI_REL_OFFSET gs, PT_GS*/
12656 .endm
12657 .macro SET_KERNEL_GS reg
12658 +
12659 +#ifdef CONFIG_CC_STACKPROTECTOR
12660 movl $(__KERNEL_STACK_CANARY), \reg
12661 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12662 + movl $(__USER_DS), \reg
12663 +#else
12664 + xorl \reg, \reg
12665 +#endif
12666 +
12667 movl \reg, %gs
12668 .endm
12669
12670 #endif /* CONFIG_X86_32_LAZY_GS */
12671
12672 -.macro SAVE_ALL
12673 +.macro pax_enter_kernel
12674 +#ifdef CONFIG_PAX_KERNEXEC
12675 + call pax_enter_kernel
12676 +#endif
12677 +.endm
12678 +
12679 +.macro pax_exit_kernel
12680 +#ifdef CONFIG_PAX_KERNEXEC
12681 + call pax_exit_kernel
12682 +#endif
12683 +.endm
12684 +
12685 +#ifdef CONFIG_PAX_KERNEXEC
12686 +ENTRY(pax_enter_kernel)
12687 +#ifdef CONFIG_PARAVIRT
12688 + pushl %eax
12689 + pushl %ecx
12690 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12691 + mov %eax, %esi
12692 +#else
12693 + mov %cr0, %esi
12694 +#endif
12695 + bts $16, %esi
12696 + jnc 1f
12697 + mov %cs, %esi
12698 + cmp $__KERNEL_CS, %esi
12699 + jz 3f
12700 + ljmp $__KERNEL_CS, $3f
12701 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12702 +2:
12703 +#ifdef CONFIG_PARAVIRT
12704 + mov %esi, %eax
12705 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12706 +#else
12707 + mov %esi, %cr0
12708 +#endif
12709 +3:
12710 +#ifdef CONFIG_PARAVIRT
12711 + popl %ecx
12712 + popl %eax
12713 +#endif
12714 + ret
12715 +ENDPROC(pax_enter_kernel)
12716 +
12717 +ENTRY(pax_exit_kernel)
12718 +#ifdef CONFIG_PARAVIRT
12719 + pushl %eax
12720 + pushl %ecx
12721 +#endif
12722 + mov %cs, %esi
12723 + cmp $__KERNEXEC_KERNEL_CS, %esi
12724 + jnz 2f
12725 +#ifdef CONFIG_PARAVIRT
12726 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12727 + mov %eax, %esi
12728 +#else
12729 + mov %cr0, %esi
12730 +#endif
12731 + btr $16, %esi
12732 + ljmp $__KERNEL_CS, $1f
12733 +1:
12734 +#ifdef CONFIG_PARAVIRT
12735 + mov %esi, %eax
12736 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12737 +#else
12738 + mov %esi, %cr0
12739 +#endif
12740 +2:
12741 +#ifdef CONFIG_PARAVIRT
12742 + popl %ecx
12743 + popl %eax
12744 +#endif
12745 + ret
12746 +ENDPROC(pax_exit_kernel)
12747 +#endif
12748 +
12749 +.macro pax_erase_kstack
12750 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12751 + call pax_erase_kstack
12752 +#endif
12753 +.endm
12754 +
12755 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12756 +/*
12757 + * ebp: thread_info
12758 + * ecx, edx: can be clobbered
12759 + */
12760 +ENTRY(pax_erase_kstack)
12761 + pushl %edi
12762 + pushl %eax
12763 +
12764 + mov TI_lowest_stack(%ebp), %edi
12765 + mov $-0xBEEF, %eax
12766 + std
12767 +
12768 +1: mov %edi, %ecx
12769 + and $THREAD_SIZE_asm - 1, %ecx
12770 + shr $2, %ecx
12771 + repne scasl
12772 + jecxz 2f
12773 +
12774 + cmp $2*16, %ecx
12775 + jc 2f
12776 +
12777 + mov $2*16, %ecx
12778 + repe scasl
12779 + jecxz 2f
12780 + jne 1b
12781 +
12782 +2: cld
12783 + mov %esp, %ecx
12784 + sub %edi, %ecx
12785 + shr $2, %ecx
12786 + rep stosl
12787 +
12788 + mov TI_task_thread_sp0(%ebp), %edi
12789 + sub $128, %edi
12790 + mov %edi, TI_lowest_stack(%ebp)
12791 +
12792 + popl %eax
12793 + popl %edi
12794 + ret
12795 +ENDPROC(pax_erase_kstack)
12796 +#endif
12797 +
12798 +.macro __SAVE_ALL _DS
12799 cld
12800 PUSH_GS
12801 pushl %fs
12802 @@ -224,7 +357,7 @@
12803 pushl %ebx
12804 CFI_ADJUST_CFA_OFFSET 4
12805 CFI_REL_OFFSET ebx, 0
12806 - movl $(__USER_DS), %edx
12807 + movl $\_DS, %edx
12808 movl %edx, %ds
12809 movl %edx, %es
12810 movl $(__KERNEL_PERCPU), %edx
12811 @@ -232,6 +365,15 @@
12812 SET_KERNEL_GS %edx
12813 .endm
12814
12815 +.macro SAVE_ALL
12816 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12817 + __SAVE_ALL __KERNEL_DS
12818 + pax_enter_kernel
12819 +#else
12820 + __SAVE_ALL __USER_DS
12821 +#endif
12822 +.endm
12823 +
12824 .macro RESTORE_INT_REGS
12825 popl %ebx
12826 CFI_ADJUST_CFA_OFFSET -4
12827 @@ -352,7 +494,15 @@ check_userspace:
12828 movb PT_CS(%esp), %al
12829 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12830 cmpl $USER_RPL, %eax
12831 +
12832 +#ifdef CONFIG_PAX_KERNEXEC
12833 + jae resume_userspace
12834 +
12835 + PAX_EXIT_KERNEL
12836 + jmp resume_kernel
12837 +#else
12838 jb resume_kernel # not returning to v8086 or userspace
12839 +#endif
12840
12841 ENTRY(resume_userspace)
12842 LOCKDEP_SYS_EXIT
12843 @@ -364,7 +514,7 @@ ENTRY(resume_userspace)
12844 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12845 # int/exception return?
12846 jne work_pending
12847 - jmp restore_all
12848 + jmp restore_all_pax
12849 END(ret_from_exception)
12850
12851 #ifdef CONFIG_PREEMPT
12852 @@ -414,25 +564,36 @@ sysenter_past_esp:
12853 /*CFI_REL_OFFSET cs, 0*/
12854 /*
12855 * Push current_thread_info()->sysenter_return to the stack.
12856 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12857 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
12858 */
12859 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
12860 + pushl $0
12861 CFI_ADJUST_CFA_OFFSET 4
12862 CFI_REL_OFFSET eip, 0
12863
12864 pushl %eax
12865 CFI_ADJUST_CFA_OFFSET 4
12866 SAVE_ALL
12867 + GET_THREAD_INFO(%ebp)
12868 + movl TI_sysenter_return(%ebp),%ebp
12869 + movl %ebp,PT_EIP(%esp)
12870 ENABLE_INTERRUPTS(CLBR_NONE)
12871
12872 /*
12873 * Load the potential sixth argument from user stack.
12874 * Careful about security.
12875 */
12876 + movl PT_OLDESP(%esp),%ebp
12877 +
12878 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12879 + mov PT_OLDSS(%esp),%ds
12880 +1: movl %ds:(%ebp),%ebp
12881 + push %ss
12882 + pop %ds
12883 +#else
12884 cmpl $__PAGE_OFFSET-3,%ebp
12885 jae syscall_fault
12886 1: movl (%ebp),%ebp
12887 +#endif
12888 +
12889 movl %ebp,PT_EBP(%esp)
12890 .section __ex_table,"a"
12891 .align 4
12892 @@ -455,12 +616,23 @@ sysenter_do_call:
12893 testl $_TIF_ALLWORK_MASK, %ecx
12894 jne sysexit_audit
12895 sysenter_exit:
12896 +
12897 +#ifdef CONFIG_PAX_RANDKSTACK
12898 + pushl_cfi %eax
12899 + call pax_randomize_kstack
12900 + popl_cfi %eax
12901 +#endif
12902 +
12903 + pax_erase_kstack
12904 +
12905 /* if something modifies registers it must also disable sysexit */
12906 movl PT_EIP(%esp), %edx
12907 movl PT_OLDESP(%esp), %ecx
12908 xorl %ebp,%ebp
12909 TRACE_IRQS_ON
12910 1: mov PT_FS(%esp), %fs
12911 +2: mov PT_DS(%esp), %ds
12912 +3: mov PT_ES(%esp), %es
12913 PTGS_TO_GS
12914 ENABLE_INTERRUPTS_SYSEXIT
12915
12916 @@ -477,6 +649,9 @@ sysenter_audit:
12917 movl %eax,%edx /* 2nd arg: syscall number */
12918 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12919 call audit_syscall_entry
12920 +
12921 + pax_erase_kstack
12922 +
12923 pushl %ebx
12924 CFI_ADJUST_CFA_OFFSET 4
12925 movl PT_EAX(%esp),%eax /* reload syscall number */
12926 @@ -504,11 +679,17 @@ sysexit_audit:
12927
12928 CFI_ENDPROC
12929 .pushsection .fixup,"ax"
12930 -2: movl $0,PT_FS(%esp)
12931 +4: movl $0,PT_FS(%esp)
12932 + jmp 1b
12933 +5: movl $0,PT_DS(%esp)
12934 + jmp 1b
12935 +6: movl $0,PT_ES(%esp)
12936 jmp 1b
12937 .section __ex_table,"a"
12938 .align 4
12939 - .long 1b,2b
12940 + .long 1b,4b
12941 + .long 2b,5b
12942 + .long 3b,6b
12943 .popsection
12944 PTGS_TO_GS_EX
12945 ENDPROC(ia32_sysenter_target)
12946 @@ -538,6 +719,14 @@ syscall_exit:
12947 testl $_TIF_ALLWORK_MASK, %ecx # current->work
12948 jne syscall_exit_work
12949
12950 +restore_all_pax:
12951 +
12952 +#ifdef CONFIG_PAX_RANDKSTACK
12953 + call pax_randomize_kstack
12954 +#endif
12955 +
12956 + pax_erase_kstack
12957 +
12958 restore_all:
12959 TRACE_IRQS_IRET
12960 restore_all_notrace:
12961 @@ -602,7 +791,13 @@ ldt_ss:
12962 mov PT_OLDESP(%esp), %eax /* load userspace esp */
12963 mov %dx, %ax /* eax: new kernel esp */
12964 sub %eax, %edx /* offset (low word is 0) */
12965 - PER_CPU(gdt_page, %ebx)
12966 +#ifdef CONFIG_SMP
12967 + movl PER_CPU_VAR(cpu_number), %ebx
12968 + shll $PAGE_SHIFT_asm, %ebx
12969 + addl $cpu_gdt_table, %ebx
12970 +#else
12971 + movl $cpu_gdt_table, %ebx
12972 +#endif
12973 shr $16, %edx
12974 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
12975 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
12976 @@ -636,31 +831,25 @@ work_resched:
12977 movl TI_flags(%ebp), %ecx
12978 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
12979 # than syscall tracing?
12980 - jz restore_all
12981 + jz restore_all_pax
12982 testb $_TIF_NEED_RESCHED, %cl
12983 jnz work_resched
12984
12985 work_notifysig: # deal with pending signals and
12986 # notify-resume requests
12987 + movl %esp, %eax
12988 #ifdef CONFIG_VM86
12989 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
12990 - movl %esp, %eax
12991 - jne work_notifysig_v86 # returning to kernel-space or
12992 + jz 1f # returning to kernel-space or
12993 # vm86-space
12994 - xorl %edx, %edx
12995 - call do_notify_resume
12996 - jmp resume_userspace_sig
12997
12998 - ALIGN
12999 -work_notifysig_v86:
13000 pushl %ecx # save ti_flags for do_notify_resume
13001 CFI_ADJUST_CFA_OFFSET 4
13002 call save_v86_state # %eax contains pt_regs pointer
13003 popl %ecx
13004 CFI_ADJUST_CFA_OFFSET -4
13005 movl %eax, %esp
13006 -#else
13007 - movl %esp, %eax
13008 +1:
13009 #endif
13010 xorl %edx, %edx
13011 call do_notify_resume
13012 @@ -673,6 +862,9 @@ syscall_trace_entry:
13013 movl $-ENOSYS,PT_EAX(%esp)
13014 movl %esp, %eax
13015 call syscall_trace_enter
13016 +
13017 + pax_erase_kstack
13018 +
13019 /* What it returned is what we'll actually use. */
13020 cmpl $(nr_syscalls), %eax
13021 jnae syscall_call
13022 @@ -695,6 +887,10 @@ END(syscall_exit_work)
13023
13024 RING0_INT_FRAME # can't unwind into user space anyway
13025 syscall_fault:
13026 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13027 + push %ss
13028 + pop %ds
13029 +#endif
13030 GET_THREAD_INFO(%ebp)
13031 movl $-EFAULT,PT_EAX(%esp)
13032 jmp resume_userspace
13033 @@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
13034 PTREGSCALL(vm86)
13035 PTREGSCALL(vm86old)
13036
13037 + ALIGN;
13038 +ENTRY(kernel_execve)
13039 + push %ebp
13040 + sub $PT_OLDSS+4,%esp
13041 + push %edi
13042 + push %ecx
13043 + push %eax
13044 + lea 3*4(%esp),%edi
13045 + mov $PT_OLDSS/4+1,%ecx
13046 + xorl %eax,%eax
13047 + rep stosl
13048 + pop %eax
13049 + pop %ecx
13050 + pop %edi
13051 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13052 + mov %eax,PT_EBX(%esp)
13053 + mov %edx,PT_ECX(%esp)
13054 + mov %ecx,PT_EDX(%esp)
13055 + mov %esp,%eax
13056 + call sys_execve
13057 + GET_THREAD_INFO(%ebp)
13058 + test %eax,%eax
13059 + jz syscall_exit
13060 + add $PT_OLDSS+4,%esp
13061 + pop %ebp
13062 + ret
13063 +
13064 .macro FIXUP_ESPFIX_STACK
13065 /*
13066 * Switch back for ESPFIX stack to the normal zerobased stack
13067 @@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
13068 * normal stack and adjusts ESP with the matching offset.
13069 */
13070 /* fixup the stack */
13071 - PER_CPU(gdt_page, %ebx)
13072 +#ifdef CONFIG_SMP
13073 + movl PER_CPU_VAR(cpu_number), %ebx
13074 + shll $PAGE_SHIFT_asm, %ebx
13075 + addl $cpu_gdt_table, %ebx
13076 +#else
13077 + movl $cpu_gdt_table, %ebx
13078 +#endif
13079 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13080 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13081 shl $16, %eax
13082 @@ -1198,7 +1427,6 @@ return_to_handler:
13083 ret
13084 #endif
13085
13086 -.section .rodata,"a"
13087 #include "syscall_table_32.S"
13088
13089 syscall_table_size=(.-sys_call_table)
13090 @@ -1255,9 +1483,12 @@ error_code:
13091 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13092 REG_TO_PTGS %ecx
13093 SET_KERNEL_GS %ecx
13094 - movl $(__USER_DS), %ecx
13095 + movl $(__KERNEL_DS), %ecx
13096 movl %ecx, %ds
13097 movl %ecx, %es
13098 +
13099 + pax_enter_kernel
13100 +
13101 TRACE_IRQS_OFF
13102 movl %esp,%eax # pt_regs pointer
13103 call *%edi
13104 @@ -1351,6 +1582,9 @@ nmi_stack_correct:
13105 xorl %edx,%edx # zero error code
13106 movl %esp,%eax # pt_regs pointer
13107 call do_nmi
13108 +
13109 + pax_exit_kernel
13110 +
13111 jmp restore_all_notrace
13112 CFI_ENDPROC
13113
13114 @@ -1391,6 +1625,9 @@ nmi_espfix_stack:
13115 FIXUP_ESPFIX_STACK # %eax == %esp
13116 xorl %edx,%edx # zero error code
13117 call do_nmi
13118 +
13119 + pax_exit_kernel
13120 +
13121 RESTORE_REGS
13122 lss 12+4(%esp), %esp # back to espfix stack
13123 CFI_ADJUST_CFA_OFFSET -24
13124 diff -urNp linux-2.6.32.42/arch/x86/kernel/entry_64.S linux-2.6.32.42/arch/x86/kernel/entry_64.S
13125 --- linux-2.6.32.42/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13126 +++ linux-2.6.32.42/arch/x86/kernel/entry_64.S 2011-06-04 20:30:53.000000000 -0400
13127 @@ -53,6 +53,7 @@
13128 #include <asm/paravirt.h>
13129 #include <asm/ftrace.h>
13130 #include <asm/percpu.h>
13131 +#include <asm/pgtable.h>
13132
13133 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13134 #include <linux/elf-em.h>
13135 @@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13136 ENDPROC(native_usergs_sysret64)
13137 #endif /* CONFIG_PARAVIRT */
13138
13139 + .macro ljmpq sel, off
13140 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13141 + .byte 0x48; ljmp *1234f(%rip)
13142 + .pushsection .rodata
13143 + .align 16
13144 + 1234: .quad \off; .word \sel
13145 + .popsection
13146 +#else
13147 + pushq $\sel
13148 + pushq $\off
13149 + lretq
13150 +#endif
13151 + .endm
13152 +
13153 + .macro pax_enter_kernel
13154 +#ifdef CONFIG_PAX_KERNEXEC
13155 + call pax_enter_kernel
13156 +#endif
13157 + .endm
13158 +
13159 + .macro pax_exit_kernel
13160 +#ifdef CONFIG_PAX_KERNEXEC
13161 + call pax_exit_kernel
13162 +#endif
13163 + .endm
13164 +
13165 +#ifdef CONFIG_PAX_KERNEXEC
13166 +ENTRY(pax_enter_kernel)
13167 + pushq %rdi
13168 +
13169 +#ifdef CONFIG_PARAVIRT
13170 + PV_SAVE_REGS(CLBR_RDI)
13171 +#endif
13172 +
13173 + GET_CR0_INTO_RDI
13174 + bts $16,%rdi
13175 + jnc 1f
13176 + mov %cs,%edi
13177 + cmp $__KERNEL_CS,%edi
13178 + jz 3f
13179 + ljmpq __KERNEL_CS,3f
13180 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
13181 +2: SET_RDI_INTO_CR0
13182 +3:
13183 +
13184 +#ifdef CONFIG_PARAVIRT
13185 + PV_RESTORE_REGS(CLBR_RDI)
13186 +#endif
13187 +
13188 + popq %rdi
13189 + retq
13190 +ENDPROC(pax_enter_kernel)
13191 +
13192 +ENTRY(pax_exit_kernel)
13193 + pushq %rdi
13194 +
13195 +#ifdef CONFIG_PARAVIRT
13196 + PV_SAVE_REGS(CLBR_RDI)
13197 +#endif
13198 +
13199 + mov %cs,%rdi
13200 + cmp $__KERNEXEC_KERNEL_CS,%edi
13201 + jnz 2f
13202 + GET_CR0_INTO_RDI
13203 + btr $16,%rdi
13204 + ljmpq __KERNEL_CS,1f
13205 +1: SET_RDI_INTO_CR0
13206 +2:
13207 +
13208 +#ifdef CONFIG_PARAVIRT
13209 + PV_RESTORE_REGS(CLBR_RDI);
13210 +#endif
13211 +
13212 + popq %rdi
13213 + retq
13214 +ENDPROC(pax_exit_kernel)
13215 +#endif
13216 +
13217 + .macro pax_enter_kernel_user
13218 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13219 + call pax_enter_kernel_user
13220 +#endif
13221 + .endm
13222 +
13223 + .macro pax_exit_kernel_user
13224 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13225 + call pax_exit_kernel_user
13226 +#endif
13227 +#ifdef CONFIG_PAX_RANDKSTACK
13228 + push %rax
13229 + call pax_randomize_kstack
13230 + pop %rax
13231 +#endif
13232 + pax_erase_kstack
13233 + .endm
13234 +
13235 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13236 +ENTRY(pax_enter_kernel_user)
13237 + pushq %rdi
13238 + pushq %rbx
13239 +
13240 +#ifdef CONFIG_PARAVIRT
13241 + PV_SAVE_REGS(CLBR_RDI)
13242 +#endif
13243 +
13244 + GET_CR3_INTO_RDI
13245 + mov %rdi,%rbx
13246 + add $__START_KERNEL_map,%rbx
13247 + sub phys_base(%rip),%rbx
13248 +
13249 +#ifdef CONFIG_PARAVIRT
13250 + pushq %rdi
13251 + cmpl $0, pv_info+PARAVIRT_enabled
13252 + jz 1f
13253 + i = 0
13254 + .rept USER_PGD_PTRS
13255 + mov i*8(%rbx),%rsi
13256 + mov $0,%sil
13257 + lea i*8(%rbx),%rdi
13258 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13259 + i = i + 1
13260 + .endr
13261 + jmp 2f
13262 +1:
13263 +#endif
13264 +
13265 + i = 0
13266 + .rept USER_PGD_PTRS
13267 + movb $0,i*8(%rbx)
13268 + i = i + 1
13269 + .endr
13270 +
13271 +#ifdef CONFIG_PARAVIRT
13272 +2: popq %rdi
13273 +#endif
13274 + SET_RDI_INTO_CR3
13275 +
13276 +#ifdef CONFIG_PAX_KERNEXEC
13277 + GET_CR0_INTO_RDI
13278 + bts $16,%rdi
13279 + SET_RDI_INTO_CR0
13280 +#endif
13281 +
13282 +#ifdef CONFIG_PARAVIRT
13283 + PV_RESTORE_REGS(CLBR_RDI)
13284 +#endif
13285 +
13286 + popq %rbx
13287 + popq %rdi
13288 + retq
13289 +ENDPROC(pax_enter_kernel_user)
13290 +
13291 +ENTRY(pax_exit_kernel_user)
13292 + push %rdi
13293 +
13294 +#ifdef CONFIG_PARAVIRT
13295 + pushq %rbx
13296 + PV_SAVE_REGS(CLBR_RDI)
13297 +#endif
13298 +
13299 +#ifdef CONFIG_PAX_KERNEXEC
13300 + GET_CR0_INTO_RDI
13301 + btr $16,%rdi
13302 + SET_RDI_INTO_CR0
13303 +#endif
13304 +
13305 + GET_CR3_INTO_RDI
13306 + add $__START_KERNEL_map,%rdi
13307 + sub phys_base(%rip),%rdi
13308 +
13309 +#ifdef CONFIG_PARAVIRT
13310 + cmpl $0, pv_info+PARAVIRT_enabled
13311 + jz 1f
13312 + mov %rdi,%rbx
13313 + i = 0
13314 + .rept USER_PGD_PTRS
13315 + mov i*8(%rbx),%rsi
13316 + mov $0x67,%sil
13317 + lea i*8(%rbx),%rdi
13318 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13319 + i = i + 1
13320 + .endr
13321 + jmp 2f
13322 +1:
13323 +#endif
13324 +
13325 + i = 0
13326 + .rept USER_PGD_PTRS
13327 + movb $0x67,i*8(%rdi)
13328 + i = i + 1
13329 + .endr
13330 +
13331 +#ifdef CONFIG_PARAVIRT
13332 +2: PV_RESTORE_REGS(CLBR_RDI)
13333 + popq %rbx
13334 +#endif
13335 +
13336 + popq %rdi
13337 + retq
13338 +ENDPROC(pax_exit_kernel_user)
13339 +#endif
13340 +
13341 +.macro pax_erase_kstack
13342 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13343 + call pax_erase_kstack
13344 +#endif
13345 +.endm
13346 +
13347 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13348 +/*
13349 + * r10: thread_info
13350 + * rcx, rdx: can be clobbered
13351 + */
13352 +ENTRY(pax_erase_kstack)
13353 + pushq %rdi
13354 + pushq %rax
13355 +
13356 + GET_THREAD_INFO(%r10)
13357 + mov TI_lowest_stack(%r10), %rdi
13358 + mov $-0xBEEF, %rax
13359 + std
13360 +
13361 +1: mov %edi, %ecx
13362 + and $THREAD_SIZE_asm - 1, %ecx
13363 + shr $3, %ecx
13364 + repne scasq
13365 + jecxz 2f
13366 +
13367 + cmp $2*8, %ecx
13368 + jc 2f
13369 +
13370 + mov $2*8, %ecx
13371 + repe scasq
13372 + jecxz 2f
13373 + jne 1b
13374 +
13375 +2: cld
13376 + mov %esp, %ecx
13377 + sub %edi, %ecx
13378 + shr $3, %ecx
13379 + rep stosq
13380 +
13381 + mov TI_task_thread_sp0(%r10), %rdi
13382 + sub $256, %rdi
13383 + mov %rdi, TI_lowest_stack(%r10)
13384 +
13385 + popq %rax
13386 + popq %rdi
13387 + ret
13388 +ENDPROC(pax_erase_kstack)
13389 +#endif
13390
13391 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13392 #ifdef CONFIG_TRACE_IRQFLAGS
13393 @@ -317,7 +569,7 @@ ENTRY(save_args)
13394 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13395 movq_cfi rbp, 8 /* push %rbp */
13396 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13397 - testl $3, CS(%rdi)
13398 + testb $3, CS(%rdi)
13399 je 1f
13400 SWAPGS
13401 /*
13402 @@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
13403
13404 RESTORE_REST
13405
13406 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13407 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13408 je int_ret_from_sys_call
13409
13410 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13411 @@ -455,7 +707,7 @@ END(ret_from_fork)
13412 ENTRY(system_call)
13413 CFI_STARTPROC simple
13414 CFI_SIGNAL_FRAME
13415 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13416 + CFI_DEF_CFA rsp,0
13417 CFI_REGISTER rip,rcx
13418 /*CFI_REGISTER rflags,r11*/
13419 SWAPGS_UNSAFE_STACK
13420 @@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
13421
13422 movq %rsp,PER_CPU_VAR(old_rsp)
13423 movq PER_CPU_VAR(kernel_stack),%rsp
13424 + pax_enter_kernel_user
13425 /*
13426 * No need to follow this irqs off/on section - it's straight
13427 * and short:
13428 */
13429 ENABLE_INTERRUPTS(CLBR_NONE)
13430 - SAVE_ARGS 8,1
13431 + SAVE_ARGS 8*6,1
13432 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13433 movq %rcx,RIP-ARGOFFSET(%rsp)
13434 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13435 @@ -502,6 +755,7 @@ sysret_check:
13436 andl %edi,%edx
13437 jnz sysret_careful
13438 CFI_REMEMBER_STATE
13439 + pax_exit_kernel_user
13440 /*
13441 * sysretq will re-enable interrupts:
13442 */
13443 @@ -562,6 +816,9 @@ auditsys:
13444 movq %rax,%rsi /* 2nd arg: syscall number */
13445 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13446 call audit_syscall_entry
13447 +
13448 + pax_erase_kstack
13449 +
13450 LOAD_ARGS 0 /* reload call-clobbered registers */
13451 jmp system_call_fastpath
13452
13453 @@ -592,6 +849,9 @@ tracesys:
13454 FIXUP_TOP_OF_STACK %rdi
13455 movq %rsp,%rdi
13456 call syscall_trace_enter
13457 +
13458 + pax_erase_kstack
13459 +
13460 /*
13461 * Reload arg registers from stack in case ptrace changed them.
13462 * We don't reload %rax because syscall_trace_enter() returned
13463 @@ -613,7 +873,7 @@ tracesys:
13464 GLOBAL(int_ret_from_sys_call)
13465 DISABLE_INTERRUPTS(CLBR_NONE)
13466 TRACE_IRQS_OFF
13467 - testl $3,CS-ARGOFFSET(%rsp)
13468 + testb $3,CS-ARGOFFSET(%rsp)
13469 je retint_restore_args
13470 movl $_TIF_ALLWORK_MASK,%edi
13471 /* edi: mask to check */
13472 @@ -800,6 +1060,16 @@ END(interrupt)
13473 CFI_ADJUST_CFA_OFFSET 10*8
13474 call save_args
13475 PARTIAL_FRAME 0
13476 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13477 + testb $3, CS(%rdi)
13478 + jnz 1f
13479 + pax_enter_kernel
13480 + jmp 2f
13481 +1: pax_enter_kernel_user
13482 +2:
13483 +#else
13484 + pax_enter_kernel
13485 +#endif
13486 call \func
13487 .endm
13488
13489 @@ -822,7 +1092,7 @@ ret_from_intr:
13490 CFI_ADJUST_CFA_OFFSET -8
13491 exit_intr:
13492 GET_THREAD_INFO(%rcx)
13493 - testl $3,CS-ARGOFFSET(%rsp)
13494 + testb $3,CS-ARGOFFSET(%rsp)
13495 je retint_kernel
13496
13497 /* Interrupt came from user space */
13498 @@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
13499 * The iretq could re-enable interrupts:
13500 */
13501 DISABLE_INTERRUPTS(CLBR_ANY)
13502 + pax_exit_kernel_user
13503 TRACE_IRQS_IRETQ
13504 SWAPGS
13505 jmp restore_args
13506
13507 retint_restore_args: /* return to kernel space */
13508 DISABLE_INTERRUPTS(CLBR_ANY)
13509 + pax_exit_kernel
13510 /*
13511 * The iretq could re-enable interrupts:
13512 */
13513 @@ -1032,6 +1304,16 @@ ENTRY(\sym)
13514 CFI_ADJUST_CFA_OFFSET 15*8
13515 call error_entry
13516 DEFAULT_FRAME 0
13517 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13518 + testb $3, CS(%rsp)
13519 + jnz 1f
13520 + pax_enter_kernel
13521 + jmp 2f
13522 +1: pax_enter_kernel_user
13523 +2:
13524 +#else
13525 + pax_enter_kernel
13526 +#endif
13527 movq %rsp,%rdi /* pt_regs pointer */
13528 xorl %esi,%esi /* no error code */
13529 call \do_sym
13530 @@ -1049,6 +1331,16 @@ ENTRY(\sym)
13531 subq $15*8, %rsp
13532 call save_paranoid
13533 TRACE_IRQS_OFF
13534 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13535 + testb $3, CS(%rsp)
13536 + jnz 1f
13537 + pax_enter_kernel
13538 + jmp 2f
13539 +1: pax_enter_kernel_user
13540 +2:
13541 +#else
13542 + pax_enter_kernel
13543 +#endif
13544 movq %rsp,%rdi /* pt_regs pointer */
13545 xorl %esi,%esi /* no error code */
13546 call \do_sym
13547 @@ -1066,9 +1358,24 @@ ENTRY(\sym)
13548 subq $15*8, %rsp
13549 call save_paranoid
13550 TRACE_IRQS_OFF
13551 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13552 + testb $3, CS(%rsp)
13553 + jnz 1f
13554 + pax_enter_kernel
13555 + jmp 2f
13556 +1: pax_enter_kernel_user
13557 +2:
13558 +#else
13559 + pax_enter_kernel
13560 +#endif
13561 movq %rsp,%rdi /* pt_regs pointer */
13562 xorl %esi,%esi /* no error code */
13563 - PER_CPU(init_tss, %rbp)
13564 +#ifdef CONFIG_SMP
13565 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
13566 + lea init_tss(%rbp), %rbp
13567 +#else
13568 + lea init_tss(%rip), %rbp
13569 +#endif
13570 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13571 call \do_sym
13572 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13573 @@ -1085,6 +1392,16 @@ ENTRY(\sym)
13574 CFI_ADJUST_CFA_OFFSET 15*8
13575 call error_entry
13576 DEFAULT_FRAME 0
13577 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13578 + testb $3, CS(%rsp)
13579 + jnz 1f
13580 + pax_enter_kernel
13581 + jmp 2f
13582 +1: pax_enter_kernel_user
13583 +2:
13584 +#else
13585 + pax_enter_kernel
13586 +#endif
13587 movq %rsp,%rdi /* pt_regs pointer */
13588 movq ORIG_RAX(%rsp),%rsi /* get error code */
13589 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13590 @@ -1104,6 +1421,16 @@ ENTRY(\sym)
13591 call save_paranoid
13592 DEFAULT_FRAME 0
13593 TRACE_IRQS_OFF
13594 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13595 + testb $3, CS(%rsp)
13596 + jnz 1f
13597 + pax_enter_kernel
13598 + jmp 2f
13599 +1: pax_enter_kernel_user
13600 +2:
13601 +#else
13602 + pax_enter_kernel
13603 +#endif
13604 movq %rsp,%rdi /* pt_regs pointer */
13605 movq ORIG_RAX(%rsp),%rsi /* get error code */
13606 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13607 @@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
13608 TRACE_IRQS_OFF
13609 testl %ebx,%ebx /* swapgs needed? */
13610 jnz paranoid_restore
13611 - testl $3,CS(%rsp)
13612 + testb $3,CS(%rsp)
13613 jnz paranoid_userspace
13614 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13615 + pax_exit_kernel
13616 + TRACE_IRQS_IRETQ 0
13617 + SWAPGS_UNSAFE_STACK
13618 + RESTORE_ALL 8
13619 + jmp irq_return
13620 +#endif
13621 paranoid_swapgs:
13622 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13623 + pax_exit_kernel_user
13624 +#else
13625 + pax_exit_kernel
13626 +#endif
13627 TRACE_IRQS_IRETQ 0
13628 SWAPGS_UNSAFE_STACK
13629 RESTORE_ALL 8
13630 jmp irq_return
13631 paranoid_restore:
13632 + pax_exit_kernel
13633 TRACE_IRQS_IRETQ 0
13634 RESTORE_ALL 8
13635 jmp irq_return
13636 @@ -1470,7 +1810,7 @@ ENTRY(error_entry)
13637 movq_cfi r14, R14+8
13638 movq_cfi r15, R15+8
13639 xorl %ebx,%ebx
13640 - testl $3,CS+8(%rsp)
13641 + testb $3,CS+8(%rsp)
13642 je error_kernelspace
13643 error_swapgs:
13644 SWAPGS
13645 @@ -1529,6 +1869,16 @@ ENTRY(nmi)
13646 CFI_ADJUST_CFA_OFFSET 15*8
13647 call save_paranoid
13648 DEFAULT_FRAME 0
13649 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13650 + testb $3, CS(%rsp)
13651 + jnz 1f
13652 + pax_enter_kernel
13653 + jmp 2f
13654 +1: pax_enter_kernel_user
13655 +2:
13656 +#else
13657 + pax_enter_kernel
13658 +#endif
13659 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13660 movq %rsp,%rdi
13661 movq $-1,%rsi
13662 @@ -1539,11 +1889,25 @@ ENTRY(nmi)
13663 DISABLE_INTERRUPTS(CLBR_NONE)
13664 testl %ebx,%ebx /* swapgs needed? */
13665 jnz nmi_restore
13666 - testl $3,CS(%rsp)
13667 + testb $3,CS(%rsp)
13668 jnz nmi_userspace
13669 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13670 + pax_exit_kernel
13671 + SWAPGS_UNSAFE_STACK
13672 + RESTORE_ALL 8
13673 + jmp irq_return
13674 +#endif
13675 nmi_swapgs:
13676 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13677 + pax_exit_kernel_user
13678 +#else
13679 + pax_exit_kernel
13680 +#endif
13681 SWAPGS_UNSAFE_STACK
13682 + RESTORE_ALL 8
13683 + jmp irq_return
13684 nmi_restore:
13685 + pax_exit_kernel
13686 RESTORE_ALL 8
13687 jmp irq_return
13688 nmi_userspace:
13689 diff -urNp linux-2.6.32.42/arch/x86/kernel/ftrace.c linux-2.6.32.42/arch/x86/kernel/ftrace.c
13690 --- linux-2.6.32.42/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
13691 +++ linux-2.6.32.42/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
13692 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
13693 static void *mod_code_newcode; /* holds the text to write to the IP */
13694
13695 static unsigned nmi_wait_count;
13696 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
13697 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13698
13699 int ftrace_arch_read_dyn_info(char *buf, int size)
13700 {
13701 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13702
13703 r = snprintf(buf, size, "%u %u",
13704 nmi_wait_count,
13705 - atomic_read(&nmi_update_count));
13706 + atomic_read_unchecked(&nmi_update_count));
13707 return r;
13708 }
13709
13710 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
13711 {
13712 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13713 smp_rmb();
13714 + pax_open_kernel();
13715 ftrace_mod_code();
13716 - atomic_inc(&nmi_update_count);
13717 + pax_close_kernel();
13718 + atomic_inc_unchecked(&nmi_update_count);
13719 }
13720 /* Must have previous changes seen before executions */
13721 smp_mb();
13722 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
13723
13724
13725
13726 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
13727 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
13728
13729 static unsigned char *ftrace_nop_replace(void)
13730 {
13731 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
13732 {
13733 unsigned char replaced[MCOUNT_INSN_SIZE];
13734
13735 + ip = ktla_ktva(ip);
13736 +
13737 /*
13738 * Note: Due to modules and __init, code can
13739 * disappear and change, we need to protect against faulting
13740 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13741 unsigned char old[MCOUNT_INSN_SIZE], *new;
13742 int ret;
13743
13744 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13745 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13746 new = ftrace_call_replace(ip, (unsigned long)func);
13747 ret = ftrace_modify_code(ip, old, new);
13748
13749 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
13750 switch (faulted) {
13751 case 0:
13752 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
13753 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
13754 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
13755 break;
13756 case 1:
13757 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
13758 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
13759 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
13760 break;
13761 case 2:
13762 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
13763 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
13764 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
13765 break;
13766 }
13767
13768 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
13769 {
13770 unsigned char code[MCOUNT_INSN_SIZE];
13771
13772 + ip = ktla_ktva(ip);
13773 +
13774 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
13775 return -EFAULT;
13776
13777 diff -urNp linux-2.6.32.42/arch/x86/kernel/head32.c linux-2.6.32.42/arch/x86/kernel/head32.c
13778 --- linux-2.6.32.42/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
13779 +++ linux-2.6.32.42/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
13780 @@ -16,6 +16,7 @@
13781 #include <asm/apic.h>
13782 #include <asm/io_apic.h>
13783 #include <asm/bios_ebda.h>
13784 +#include <asm/boot.h>
13785
13786 static void __init i386_default_early_setup(void)
13787 {
13788 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
13789 {
13790 reserve_trampoline_memory();
13791
13792 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13793 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13794
13795 #ifdef CONFIG_BLK_DEV_INITRD
13796 /* Reserve INITRD */
13797 diff -urNp linux-2.6.32.42/arch/x86/kernel/head_32.S linux-2.6.32.42/arch/x86/kernel/head_32.S
13798 --- linux-2.6.32.42/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
13799 +++ linux-2.6.32.42/arch/x86/kernel/head_32.S 2011-04-17 15:56:46.000000000 -0400
13800 @@ -19,10 +19,17 @@
13801 #include <asm/setup.h>
13802 #include <asm/processor-flags.h>
13803 #include <asm/percpu.h>
13804 +#include <asm/msr-index.h>
13805
13806 /* Physical address */
13807 #define pa(X) ((X) - __PAGE_OFFSET)
13808
13809 +#ifdef CONFIG_PAX_KERNEXEC
13810 +#define ta(X) (X)
13811 +#else
13812 +#define ta(X) ((X) - __PAGE_OFFSET)
13813 +#endif
13814 +
13815 /*
13816 * References to members of the new_cpu_data structure.
13817 */
13818 @@ -52,11 +59,7 @@
13819 * and small than max_low_pfn, otherwise will waste some page table entries
13820 */
13821
13822 -#if PTRS_PER_PMD > 1
13823 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
13824 -#else
13825 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
13826 -#endif
13827 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
13828
13829 /* Enough space to fit pagetables for the low memory linear map */
13830 MAPPING_BEYOND_END = \
13831 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
13832 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13833
13834 /*
13835 + * Real beginning of normal "text" segment
13836 + */
13837 +ENTRY(stext)
13838 +ENTRY(_stext)
13839 +
13840 +/*
13841 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
13842 * %esi points to the real-mode code as a 32-bit pointer.
13843 * CS and DS must be 4 GB flat segments, but we don't depend on
13844 @@ -80,6 +89,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13845 * can.
13846 */
13847 __HEAD
13848 +
13849 +#ifdef CONFIG_PAX_KERNEXEC
13850 + jmp startup_32
13851 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
13852 +.fill PAGE_SIZE-5,1,0xcc
13853 +#endif
13854 +
13855 ENTRY(startup_32)
13856 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
13857 us to not reload segments */
13858 @@ -97,6 +113,57 @@ ENTRY(startup_32)
13859 movl %eax,%gs
13860 2:
13861
13862 +#ifdef CONFIG_SMP
13863 + movl $pa(cpu_gdt_table),%edi
13864 + movl $__per_cpu_load,%eax
13865 + movw %ax,__KERNEL_PERCPU + 2(%edi)
13866 + rorl $16,%eax
13867 + movb %al,__KERNEL_PERCPU + 4(%edi)
13868 + movb %ah,__KERNEL_PERCPU + 7(%edi)
13869 + movl $__per_cpu_end - 1,%eax
13870 + subl $__per_cpu_start,%eax
13871 + movw %ax,__KERNEL_PERCPU + 0(%edi)
13872 +#endif
13873 +
13874 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13875 + movl $NR_CPUS,%ecx
13876 + movl $pa(cpu_gdt_table),%edi
13877 +1:
13878 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
13879 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
13880 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
13881 + addl $PAGE_SIZE_asm,%edi
13882 + loop 1b
13883 +#endif
13884 +
13885 +#ifdef CONFIG_PAX_KERNEXEC
13886 + movl $pa(boot_gdt),%edi
13887 + movl $__LOAD_PHYSICAL_ADDR,%eax
13888 + movw %ax,__BOOT_CS + 2(%edi)
13889 + rorl $16,%eax
13890 + movb %al,__BOOT_CS + 4(%edi)
13891 + movb %ah,__BOOT_CS + 7(%edi)
13892 + rorl $16,%eax
13893 +
13894 + ljmp $(__BOOT_CS),$1f
13895 +1:
13896 +
13897 + movl $NR_CPUS,%ecx
13898 + movl $pa(cpu_gdt_table),%edi
13899 + addl $__PAGE_OFFSET,%eax
13900 +1:
13901 + movw %ax,__KERNEL_CS + 2(%edi)
13902 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
13903 + rorl $16,%eax
13904 + movb %al,__KERNEL_CS + 4(%edi)
13905 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
13906 + movb %ah,__KERNEL_CS + 7(%edi)
13907 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
13908 + rorl $16,%eax
13909 + addl $PAGE_SIZE_asm,%edi
13910 + loop 1b
13911 +#endif
13912 +
13913 /*
13914 * Clear BSS first so that there are no surprises...
13915 */
13916 @@ -140,9 +207,7 @@ ENTRY(startup_32)
13917 cmpl $num_subarch_entries, %eax
13918 jae bad_subarch
13919
13920 - movl pa(subarch_entries)(,%eax,4), %eax
13921 - subl $__PAGE_OFFSET, %eax
13922 - jmp *%eax
13923 + jmp *pa(subarch_entries)(,%eax,4)
13924
13925 bad_subarch:
13926 WEAK(lguest_entry)
13927 @@ -154,10 +219,10 @@ WEAK(xen_entry)
13928 __INITDATA
13929
13930 subarch_entries:
13931 - .long default_entry /* normal x86/PC */
13932 - .long lguest_entry /* lguest hypervisor */
13933 - .long xen_entry /* Xen hypervisor */
13934 - .long default_entry /* Moorestown MID */
13935 + .long ta(default_entry) /* normal x86/PC */
13936 + .long ta(lguest_entry) /* lguest hypervisor */
13937 + .long ta(xen_entry) /* Xen hypervisor */
13938 + .long ta(default_entry) /* Moorestown MID */
13939 num_subarch_entries = (. - subarch_entries) / 4
13940 .previous
13941 #endif /* CONFIG_PARAVIRT */
13942 @@ -218,8 +283,11 @@ default_entry:
13943 movl %eax, pa(max_pfn_mapped)
13944
13945 /* Do early initialization of the fixmap area */
13946 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
13947 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13948 +#ifdef CONFIG_COMPAT_VDSO
13949 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13950 +#else
13951 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13952 +#endif
13953 #else /* Not PAE */
13954
13955 page_pde_offset = (__PAGE_OFFSET >> 20);
13956 @@ -249,8 +317,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13957 movl %eax, pa(max_pfn_mapped)
13958
13959 /* Do early initialization of the fixmap area */
13960 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
13961 - movl %eax,pa(swapper_pg_dir+0xffc)
13962 +#ifdef CONFIG_COMPAT_VDSO
13963 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
13964 +#else
13965 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
13966 +#endif
13967 #endif
13968 jmp 3f
13969 /*
13970 @@ -297,6 +368,7 @@ ENTRY(startup_32_smp)
13971 orl %edx,%eax
13972 movl %eax,%cr4
13973
13974 +#ifdef CONFIG_X86_PAE
13975 btl $5, %eax # check if PAE is enabled
13976 jnc 6f
13977
13978 @@ -312,13 +384,17 @@ ENTRY(startup_32_smp)
13979 jnc 6f
13980
13981 /* Setup EFER (Extended Feature Enable Register) */
13982 - movl $0xc0000080, %ecx
13983 + movl $MSR_EFER, %ecx
13984 rdmsr
13985
13986 btsl $11, %eax
13987 /* Make changes effective */
13988 wrmsr
13989
13990 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
13991 + movl $1,pa(nx_enabled)
13992 +#endif
13993 +
13994 6:
13995
13996 /*
13997 @@ -344,9 +420,7 @@ ENTRY(startup_32_smp)
13998
13999 #ifdef CONFIG_SMP
14000 cmpb $0, ready
14001 - jz 1f /* Initial CPU cleans BSS */
14002 - jmp checkCPUtype
14003 -1:
14004 + jnz checkCPUtype /* Initial CPU cleans BSS */
14005 #endif /* CONFIG_SMP */
14006
14007 /*
14008 @@ -424,7 +498,7 @@ is386: movl $2,%ecx # set MP
14009 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14010 movl %eax,%ss # after changing gdt.
14011
14012 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14013 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14014 movl %eax,%ds
14015 movl %eax,%es
14016
14017 @@ -438,15 +512,22 @@ is386: movl $2,%ecx # set MP
14018 */
14019 cmpb $0,ready
14020 jne 1f
14021 - movl $per_cpu__gdt_page,%eax
14022 + movl $cpu_gdt_table,%eax
14023 movl $per_cpu__stack_canary,%ecx
14024 +#ifdef CONFIG_SMP
14025 + addl $__per_cpu_load,%ecx
14026 +#endif
14027 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14028 shrl $16, %ecx
14029 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14030 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14031 1:
14032 -#endif
14033 movl $(__KERNEL_STACK_CANARY),%eax
14034 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14035 + movl $(__USER_DS),%eax
14036 +#else
14037 + xorl %eax,%eax
14038 +#endif
14039 movl %eax,%gs
14040
14041 xorl %eax,%eax # Clear LDT
14042 @@ -457,10 +538,6 @@ is386: movl $2,%ecx # set MP
14043 #ifdef CONFIG_SMP
14044 movb ready, %cl
14045 movb $1, ready
14046 - cmpb $0,%cl # the first CPU calls start_kernel
14047 - je 1f
14048 - movl (stack_start), %esp
14049 -1:
14050 #endif /* CONFIG_SMP */
14051 jmp *(initial_code)
14052
14053 @@ -546,22 +623,22 @@ early_page_fault:
14054 jmp early_fault
14055
14056 early_fault:
14057 - cld
14058 #ifdef CONFIG_PRINTK
14059 + cmpl $1,%ss:early_recursion_flag
14060 + je hlt_loop
14061 + incl %ss:early_recursion_flag
14062 + cld
14063 pusha
14064 movl $(__KERNEL_DS),%eax
14065 movl %eax,%ds
14066 movl %eax,%es
14067 - cmpl $2,early_recursion_flag
14068 - je hlt_loop
14069 - incl early_recursion_flag
14070 movl %cr2,%eax
14071 pushl %eax
14072 pushl %edx /* trapno */
14073 pushl $fault_msg
14074 call printk
14075 +; call dump_stack
14076 #endif
14077 - call dump_stack
14078 hlt_loop:
14079 hlt
14080 jmp hlt_loop
14081 @@ -569,8 +646,11 @@ hlt_loop:
14082 /* This is the default interrupt "handler" :-) */
14083 ALIGN
14084 ignore_int:
14085 - cld
14086 #ifdef CONFIG_PRINTK
14087 + cmpl $2,%ss:early_recursion_flag
14088 + je hlt_loop
14089 + incl %ss:early_recursion_flag
14090 + cld
14091 pushl %eax
14092 pushl %ecx
14093 pushl %edx
14094 @@ -579,9 +659,6 @@ ignore_int:
14095 movl $(__KERNEL_DS),%eax
14096 movl %eax,%ds
14097 movl %eax,%es
14098 - cmpl $2,early_recursion_flag
14099 - je hlt_loop
14100 - incl early_recursion_flag
14101 pushl 16(%esp)
14102 pushl 24(%esp)
14103 pushl 32(%esp)
14104 @@ -610,31 +687,47 @@ ENTRY(initial_page_table)
14105 /*
14106 * BSS section
14107 */
14108 -__PAGE_ALIGNED_BSS
14109 - .align PAGE_SIZE_asm
14110 #ifdef CONFIG_X86_PAE
14111 +.section .swapper_pg_pmd,"a",@progbits
14112 swapper_pg_pmd:
14113 .fill 1024*KPMDS,4,0
14114 #else
14115 +.section .swapper_pg_dir,"a",@progbits
14116 ENTRY(swapper_pg_dir)
14117 .fill 1024,4,0
14118 #endif
14119 +.section .swapper_pg_fixmap,"a",@progbits
14120 swapper_pg_fixmap:
14121 .fill 1024,4,0
14122 #ifdef CONFIG_X86_TRAMPOLINE
14123 +.section .trampoline_pg_dir,"a",@progbits
14124 ENTRY(trampoline_pg_dir)
14125 +#ifdef CONFIG_X86_PAE
14126 + .fill 4,8,0
14127 +#else
14128 .fill 1024,4,0
14129 #endif
14130 +#endif
14131 +
14132 +.section .empty_zero_page,"a",@progbits
14133 ENTRY(empty_zero_page)
14134 .fill 4096,1,0
14135
14136 /*
14137 + * The IDT has to be page-aligned to simplify the Pentium
14138 + * F0 0F bug workaround.. We have a special link segment
14139 + * for this.
14140 + */
14141 +.section .idt,"a",@progbits
14142 +ENTRY(idt_table)
14143 + .fill 256,8,0
14144 +
14145 +/*
14146 * This starts the data section.
14147 */
14148 #ifdef CONFIG_X86_PAE
14149 -__PAGE_ALIGNED_DATA
14150 - /* Page-aligned for the benefit of paravirt? */
14151 - .align PAGE_SIZE_asm
14152 +.section .swapper_pg_dir,"a",@progbits
14153 +
14154 ENTRY(swapper_pg_dir)
14155 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14156 # if KPMDS == 3
14157 @@ -653,15 +746,24 @@ ENTRY(swapper_pg_dir)
14158 # error "Kernel PMDs should be 1, 2 or 3"
14159 # endif
14160 .align PAGE_SIZE_asm /* needs to be page-sized too */
14161 +
14162 +#ifdef CONFIG_PAX_PER_CPU_PGD
14163 +ENTRY(cpu_pgd)
14164 + .rept NR_CPUS
14165 + .fill 4,8,0
14166 + .endr
14167 +#endif
14168 +
14169 #endif
14170
14171 .data
14172 ENTRY(stack_start)
14173 - .long init_thread_union+THREAD_SIZE
14174 + .long init_thread_union+THREAD_SIZE-8
14175 .long __BOOT_DS
14176
14177 ready: .byte 0
14178
14179 +.section .rodata,"a",@progbits
14180 early_recursion_flag:
14181 .long 0
14182
14183 @@ -697,7 +799,7 @@ fault_msg:
14184 .word 0 # 32 bit align gdt_desc.address
14185 boot_gdt_descr:
14186 .word __BOOT_DS+7
14187 - .long boot_gdt - __PAGE_OFFSET
14188 + .long pa(boot_gdt)
14189
14190 .word 0 # 32-bit align idt_desc.address
14191 idt_descr:
14192 @@ -708,7 +810,7 @@ idt_descr:
14193 .word 0 # 32 bit align gdt_desc.address
14194 ENTRY(early_gdt_descr)
14195 .word GDT_ENTRIES*8-1
14196 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14197 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14198
14199 /*
14200 * The boot_gdt must mirror the equivalent in setup.S and is
14201 @@ -717,5 +819,65 @@ ENTRY(early_gdt_descr)
14202 .align L1_CACHE_BYTES
14203 ENTRY(boot_gdt)
14204 .fill GDT_ENTRY_BOOT_CS,8,0
14205 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14206 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14207 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14208 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14209 +
14210 + .align PAGE_SIZE_asm
14211 +ENTRY(cpu_gdt_table)
14212 + .rept NR_CPUS
14213 + .quad 0x0000000000000000 /* NULL descriptor */
14214 + .quad 0x0000000000000000 /* 0x0b reserved */
14215 + .quad 0x0000000000000000 /* 0x13 reserved */
14216 + .quad 0x0000000000000000 /* 0x1b reserved */
14217 +
14218 +#ifdef CONFIG_PAX_KERNEXEC
14219 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14220 +#else
14221 + .quad 0x0000000000000000 /* 0x20 unused */
14222 +#endif
14223 +
14224 + .quad 0x0000000000000000 /* 0x28 unused */
14225 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14226 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14227 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14228 + .quad 0x0000000000000000 /* 0x4b reserved */
14229 + .quad 0x0000000000000000 /* 0x53 reserved */
14230 + .quad 0x0000000000000000 /* 0x5b reserved */
14231 +
14232 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14233 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14234 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14235 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14236 +
14237 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14238 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14239 +
14240 + /*
14241 + * Segments used for calling PnP BIOS have byte granularity.
14242 + * The code segments and data segments have fixed 64k limits,
14243 + * the transfer segment sizes are set at run time.
14244 + */
14245 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14246 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14247 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14248 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14249 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14250 +
14251 + /*
14252 + * The APM segments have byte granularity and their bases
14253 + * are set at run time. All have 64k limits.
14254 + */
14255 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14256 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14257 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14258 +
14259 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14260 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14261 + .quad 0x0040910000000018 /* 0xe0 - STACK_CANARY */
14262 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14263 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14264 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14265 +
14266 + /* Be sure this is zeroed to avoid false validations in Xen */
14267 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14268 + .endr
14269 diff -urNp linux-2.6.32.42/arch/x86/kernel/head_64.S linux-2.6.32.42/arch/x86/kernel/head_64.S
14270 --- linux-2.6.32.42/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14271 +++ linux-2.6.32.42/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14272 @@ -19,6 +19,7 @@
14273 #include <asm/cache.h>
14274 #include <asm/processor-flags.h>
14275 #include <asm/percpu.h>
14276 +#include <asm/cpufeature.h>
14277
14278 #ifdef CONFIG_PARAVIRT
14279 #include <asm/asm-offsets.h>
14280 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14281 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14282 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14283 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14284 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
14285 +L3_VMALLOC_START = pud_index(VMALLOC_START)
14286 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14287 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14288
14289 .text
14290 __HEAD
14291 @@ -85,35 +90,22 @@ startup_64:
14292 */
14293 addq %rbp, init_level4_pgt + 0(%rip)
14294 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14295 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14296 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14297 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14298
14299 addq %rbp, level3_ident_pgt + 0(%rip)
14300 +#ifndef CONFIG_XEN
14301 + addq %rbp, level3_ident_pgt + 8(%rip)
14302 +#endif
14303
14304 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14305 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14306 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14307
14308 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14309 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14310 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14311
14312 - /* Add an Identity mapping if I am above 1G */
14313 - leaq _text(%rip), %rdi
14314 - andq $PMD_PAGE_MASK, %rdi
14315 -
14316 - movq %rdi, %rax
14317 - shrq $PUD_SHIFT, %rax
14318 - andq $(PTRS_PER_PUD - 1), %rax
14319 - jz ident_complete
14320 -
14321 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14322 - leaq level3_ident_pgt(%rip), %rbx
14323 - movq %rdx, 0(%rbx, %rax, 8)
14324 -
14325 - movq %rdi, %rax
14326 - shrq $PMD_SHIFT, %rax
14327 - andq $(PTRS_PER_PMD - 1), %rax
14328 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14329 - leaq level2_spare_pgt(%rip), %rbx
14330 - movq %rdx, 0(%rbx, %rax, 8)
14331 -ident_complete:
14332 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14333 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14334
14335 /*
14336 * Fixup the kernel text+data virtual addresses. Note that
14337 @@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14338 * after the boot processor executes this code.
14339 */
14340
14341 - /* Enable PAE mode and PGE */
14342 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14343 + /* Enable PAE mode and PSE/PGE */
14344 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14345 movq %rax, %cr4
14346
14347 /* Setup early boot stage 4 level pagetables. */
14348 @@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14349 movl $MSR_EFER, %ecx
14350 rdmsr
14351 btsl $_EFER_SCE, %eax /* Enable System Call */
14352 - btl $20,%edi /* No Execute supported? */
14353 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14354 jnc 1f
14355 btsl $_EFER_NX, %eax
14356 + leaq init_level4_pgt(%rip), %rdi
14357 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14358 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14359 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14360 1: wrmsr /* Make changes effective */
14361
14362 /* Setup cr0 */
14363 @@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14364 .quad x86_64_start_kernel
14365 ENTRY(initial_gs)
14366 .quad INIT_PER_CPU_VAR(irq_stack_union)
14367 - __FINITDATA
14368
14369 ENTRY(stack_start)
14370 .quad init_thread_union+THREAD_SIZE-8
14371 .word 0
14372 + __FINITDATA
14373
14374 bad_address:
14375 jmp bad_address
14376
14377 - .section ".init.text","ax"
14378 + __INIT
14379 #ifdef CONFIG_EARLY_PRINTK
14380 .globl early_idt_handlers
14381 early_idt_handlers:
14382 @@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
14383 #endif /* EARLY_PRINTK */
14384 1: hlt
14385 jmp 1b
14386 + .previous
14387
14388 #ifdef CONFIG_EARLY_PRINTK
14389 + __INITDATA
14390 early_recursion_flag:
14391 .long 0
14392 + .previous
14393
14394 + .section .rodata,"a",@progbits
14395 early_idt_msg:
14396 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14397 early_idt_ripmsg:
14398 .asciz "RIP %s\n"
14399 -#endif /* CONFIG_EARLY_PRINTK */
14400 .previous
14401 +#endif /* CONFIG_EARLY_PRINTK */
14402
14403 + .section .rodata,"a",@progbits
14404 #define NEXT_PAGE(name) \
14405 .balign PAGE_SIZE; \
14406 ENTRY(name)
14407 @@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
14408 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14409 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14410 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14411 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
14412 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14413 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14414 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14415 .org init_level4_pgt + L4_START_KERNEL*8, 0
14416 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14417 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14418
14419 +#ifdef CONFIG_PAX_PER_CPU_PGD
14420 +NEXT_PAGE(cpu_pgd)
14421 + .rept NR_CPUS
14422 + .fill 512,8,0
14423 + .endr
14424 +#endif
14425 +
14426 NEXT_PAGE(level3_ident_pgt)
14427 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14428 +#ifdef CONFIG_XEN
14429 .fill 511,8,0
14430 +#else
14431 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14432 + .fill 510,8,0
14433 +#endif
14434 +
14435 +NEXT_PAGE(level3_vmalloc_pgt)
14436 + .fill 512,8,0
14437 +
14438 +NEXT_PAGE(level3_vmemmap_pgt)
14439 + .fill L3_VMEMMAP_START,8,0
14440 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14441
14442 NEXT_PAGE(level3_kernel_pgt)
14443 .fill L3_START_KERNEL,8,0
14444 @@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
14445 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14446 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14447
14448 +NEXT_PAGE(level2_vmemmap_pgt)
14449 + .fill 512,8,0
14450 +
14451 NEXT_PAGE(level2_fixmap_pgt)
14452 - .fill 506,8,0
14453 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14454 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14455 - .fill 5,8,0
14456 + .fill 507,8,0
14457 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14458 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14459 + .fill 4,8,0
14460
14461 -NEXT_PAGE(level1_fixmap_pgt)
14462 +NEXT_PAGE(level1_vsyscall_pgt)
14463 .fill 512,8,0
14464
14465 -NEXT_PAGE(level2_ident_pgt)
14466 - /* Since I easily can, map the first 1G.
14467 + /* Since I easily can, map the first 2G.
14468 * Don't set NX because code runs from these pages.
14469 */
14470 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14471 +NEXT_PAGE(level2_ident_pgt)
14472 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14473
14474 NEXT_PAGE(level2_kernel_pgt)
14475 /*
14476 @@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
14477 * If you want to increase this then increase MODULES_VADDR
14478 * too.)
14479 */
14480 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14481 - KERNEL_IMAGE_SIZE/PMD_SIZE)
14482 -
14483 -NEXT_PAGE(level2_spare_pgt)
14484 - .fill 512, 8, 0
14485 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14486
14487 #undef PMDS
14488 #undef NEXT_PAGE
14489
14490 - .data
14491 + .align PAGE_SIZE
14492 +ENTRY(cpu_gdt_table)
14493 + .rept NR_CPUS
14494 + .quad 0x0000000000000000 /* NULL descriptor */
14495 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14496 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
14497 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
14498 + .quad 0x00cffb000000ffff /* __USER32_CS */
14499 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14500 + .quad 0x00affb000000ffff /* __USER_CS */
14501 +
14502 +#ifdef CONFIG_PAX_KERNEXEC
14503 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14504 +#else
14505 + .quad 0x0 /* unused */
14506 +#endif
14507 +
14508 + .quad 0,0 /* TSS */
14509 + .quad 0,0 /* LDT */
14510 + .quad 0,0,0 /* three TLS descriptors */
14511 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
14512 + /* asm/segment.h:GDT_ENTRIES must match this */
14513 +
14514 + /* zero the remaining page */
14515 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14516 + .endr
14517 +
14518 .align 16
14519 .globl early_gdt_descr
14520 early_gdt_descr:
14521 .word GDT_ENTRIES*8-1
14522 early_gdt_descr_base:
14523 - .quad INIT_PER_CPU_VAR(gdt_page)
14524 + .quad cpu_gdt_table
14525
14526 ENTRY(phys_base)
14527 /* This must match the first entry in level2_kernel_pgt */
14528 .quad 0x0000000000000000
14529
14530 #include "../../x86/xen/xen-head.S"
14531 -
14532 - .section .bss, "aw", @nobits
14533 +
14534 + .section .rodata,"a",@progbits
14535 .align L1_CACHE_BYTES
14536 ENTRY(idt_table)
14537 - .skip IDT_ENTRIES * 16
14538 + .fill 512,8,0
14539
14540 __PAGE_ALIGNED_BSS
14541 .align PAGE_SIZE
14542 diff -urNp linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c
14543 --- linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
14544 +++ linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
14545 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
14546 EXPORT_SYMBOL(cmpxchg8b_emu);
14547 #endif
14548
14549 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
14550 +
14551 /* Networking helper routines. */
14552 EXPORT_SYMBOL(csum_partial_copy_generic);
14553 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
14554 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
14555
14556 EXPORT_SYMBOL(__get_user_1);
14557 EXPORT_SYMBOL(__get_user_2);
14558 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
14559
14560 EXPORT_SYMBOL(csum_partial);
14561 EXPORT_SYMBOL(empty_zero_page);
14562 +
14563 +#ifdef CONFIG_PAX_KERNEXEC
14564 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
14565 +#endif
14566 diff -urNp linux-2.6.32.42/arch/x86/kernel/i8259.c linux-2.6.32.42/arch/x86/kernel/i8259.c
14567 --- linux-2.6.32.42/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
14568 +++ linux-2.6.32.42/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
14569 @@ -208,7 +208,7 @@ spurious_8259A_irq:
14570 "spurious 8259A interrupt: IRQ%d.\n", irq);
14571 spurious_irq_mask |= irqmask;
14572 }
14573 - atomic_inc(&irq_err_count);
14574 + atomic_inc_unchecked(&irq_err_count);
14575 /*
14576 * Theoretically we do not have to handle this IRQ,
14577 * but in Linux this does not cause problems and is
14578 diff -urNp linux-2.6.32.42/arch/x86/kernel/init_task.c linux-2.6.32.42/arch/x86/kernel/init_task.c
14579 --- linux-2.6.32.42/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
14580 +++ linux-2.6.32.42/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
14581 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
14582 * way process stacks are handled. This is done by having a special
14583 * "init_task" linker map entry..
14584 */
14585 -union thread_union init_thread_union __init_task_data =
14586 - { INIT_THREAD_INFO(init_task) };
14587 +union thread_union init_thread_union __init_task_data;
14588
14589 /*
14590 * Initial task structure.
14591 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
14592 * section. Since TSS's are completely CPU-local, we want them
14593 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
14594 */
14595 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
14596 -
14597 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
14598 +EXPORT_SYMBOL(init_tss);
14599 diff -urNp linux-2.6.32.42/arch/x86/kernel/ioport.c linux-2.6.32.42/arch/x86/kernel/ioport.c
14600 --- linux-2.6.32.42/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
14601 +++ linux-2.6.32.42/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
14602 @@ -6,6 +6,7 @@
14603 #include <linux/sched.h>
14604 #include <linux/kernel.h>
14605 #include <linux/capability.h>
14606 +#include <linux/security.h>
14607 #include <linux/errno.h>
14608 #include <linux/types.h>
14609 #include <linux/ioport.h>
14610 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
14611
14612 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
14613 return -EINVAL;
14614 +#ifdef CONFIG_GRKERNSEC_IO
14615 + if (turn_on && grsec_disable_privio) {
14616 + gr_handle_ioperm();
14617 + return -EPERM;
14618 + }
14619 +#endif
14620 if (turn_on && !capable(CAP_SYS_RAWIO))
14621 return -EPERM;
14622
14623 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
14624 * because the ->io_bitmap_max value must match the bitmap
14625 * contents:
14626 */
14627 - tss = &per_cpu(init_tss, get_cpu());
14628 + tss = init_tss + get_cpu();
14629
14630 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
14631
14632 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
14633 return -EINVAL;
14634 /* Trying to gain more privileges? */
14635 if (level > old) {
14636 +#ifdef CONFIG_GRKERNSEC_IO
14637 + if (grsec_disable_privio) {
14638 + gr_handle_iopl();
14639 + return -EPERM;
14640 + }
14641 +#endif
14642 if (!capable(CAP_SYS_RAWIO))
14643 return -EPERM;
14644 }
14645 diff -urNp linux-2.6.32.42/arch/x86/kernel/irq_32.c linux-2.6.32.42/arch/x86/kernel/irq_32.c
14646 --- linux-2.6.32.42/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
14647 +++ linux-2.6.32.42/arch/x86/kernel/irq_32.c 2011-04-23 13:26:46.000000000 -0400
14648 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
14649 __asm__ __volatile__("andl %%esp,%0" :
14650 "=r" (sp) : "0" (THREAD_SIZE - 1));
14651
14652 - return sp < (sizeof(struct thread_info) + STACK_WARN);
14653 + return sp < STACK_WARN;
14654 }
14655
14656 static void print_stack_overflow(void)
14657 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(
14658 * per-CPU IRQ handling contexts (thread information and stack)
14659 */
14660 union irq_ctx {
14661 - struct thread_info tinfo;
14662 - u32 stack[THREAD_SIZE/sizeof(u32)];
14663 -} __attribute__((aligned(PAGE_SIZE)));
14664 + unsigned long previous_esp;
14665 + u32 stack[THREAD_SIZE/sizeof(u32)];
14666 +} __attribute__((aligned(THREAD_SIZE)));
14667
14668 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
14669 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
14670 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
14671 static inline int
14672 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
14673 {
14674 - union irq_ctx *curctx, *irqctx;
14675 + union irq_ctx *irqctx;
14676 u32 *isp, arg1, arg2;
14677
14678 - curctx = (union irq_ctx *) current_thread_info();
14679 irqctx = __get_cpu_var(hardirq_ctx);
14680
14681 /*
14682 @@ -90,21 +89,17 @@ execute_on_irq_stack(int overflow, struc
14683 * handler) we can't do that and just have to keep using the
14684 * current stack (which is the irq stack already after all)
14685 */
14686 - if (unlikely(curctx == irqctx))
14687 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14688 return 0;
14689
14690 /* build the stack frame on the IRQ stack */
14691 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14692 - irqctx->tinfo.task = curctx->tinfo.task;
14693 - irqctx->tinfo.previous_esp = current_stack_pointer;
14694 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14695 + irqctx->previous_esp = current_stack_pointer;
14696 + add_preempt_count(HARDIRQ_OFFSET);
14697
14698 - /*
14699 - * Copy the softirq bits in preempt_count so that the
14700 - * softirq checks work in the hardirq context.
14701 - */
14702 - irqctx->tinfo.preempt_count =
14703 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
14704 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
14705 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14706 + __set_fs(MAKE_MM_SEG(0));
14707 +#endif
14708
14709 if (unlikely(overflow))
14710 call_on_stack(print_stack_overflow, isp);
14711 @@ -116,6 +111,12 @@ execute_on_irq_stack(int overflow, struc
14712 : "0" (irq), "1" (desc), "2" (isp),
14713 "D" (desc->handle_irq)
14714 : "memory", "cc", "ecx");
14715 +
14716 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14717 + __set_fs(current_thread_info()->addr_limit);
14718 +#endif
14719 +
14720 + sub_preempt_count(HARDIRQ_OFFSET);
14721 return 1;
14722 }
14723
14724 @@ -124,28 +125,11 @@ execute_on_irq_stack(int overflow, struc
14725 */
14726 void __cpuinit irq_ctx_init(int cpu)
14727 {
14728 - union irq_ctx *irqctx;
14729 -
14730 if (per_cpu(hardirq_ctx, cpu))
14731 return;
14732
14733 - irqctx = &per_cpu(hardirq_stack, cpu);
14734 - irqctx->tinfo.task = NULL;
14735 - irqctx->tinfo.exec_domain = NULL;
14736 - irqctx->tinfo.cpu = cpu;
14737 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
14738 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14739 -
14740 - per_cpu(hardirq_ctx, cpu) = irqctx;
14741 -
14742 - irqctx = &per_cpu(softirq_stack, cpu);
14743 - irqctx->tinfo.task = NULL;
14744 - irqctx->tinfo.exec_domain = NULL;
14745 - irqctx->tinfo.cpu = cpu;
14746 - irqctx->tinfo.preempt_count = 0;
14747 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14748 -
14749 - per_cpu(softirq_ctx, cpu) = irqctx;
14750 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
14751 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
14752
14753 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
14754 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
14755 @@ -159,7 +143,6 @@ void irq_ctx_exit(int cpu)
14756 asmlinkage void do_softirq(void)
14757 {
14758 unsigned long flags;
14759 - struct thread_info *curctx;
14760 union irq_ctx *irqctx;
14761 u32 *isp;
14762
14763 @@ -169,15 +152,22 @@ asmlinkage void do_softirq(void)
14764 local_irq_save(flags);
14765
14766 if (local_softirq_pending()) {
14767 - curctx = current_thread_info();
14768 irqctx = __get_cpu_var(softirq_ctx);
14769 - irqctx->tinfo.task = curctx->task;
14770 - irqctx->tinfo.previous_esp = current_stack_pointer;
14771 + irqctx->previous_esp = current_stack_pointer;
14772
14773 /* build the stack frame on the softirq stack */
14774 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14775 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14776 +
14777 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14778 + __set_fs(MAKE_MM_SEG(0));
14779 +#endif
14780
14781 call_on_stack(__do_softirq, isp);
14782 +
14783 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14784 + __set_fs(current_thread_info()->addr_limit);
14785 +#endif
14786 +
14787 /*
14788 * Shouldnt happen, we returned above if in_interrupt():
14789 */
14790 diff -urNp linux-2.6.32.42/arch/x86/kernel/irq.c linux-2.6.32.42/arch/x86/kernel/irq.c
14791 --- linux-2.6.32.42/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
14792 +++ linux-2.6.32.42/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
14793 @@ -15,7 +15,7 @@
14794 #include <asm/mce.h>
14795 #include <asm/hw_irq.h>
14796
14797 -atomic_t irq_err_count;
14798 +atomic_unchecked_t irq_err_count;
14799
14800 /* Function pointer for generic interrupt vector handling */
14801 void (*generic_interrupt_extension)(void) = NULL;
14802 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct
14803 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
14804 seq_printf(p, " Machine check polls\n");
14805 #endif
14806 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
14807 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
14808 #if defined(CONFIG_X86_IO_APIC)
14809 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
14810 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
14811 #endif
14812 return 0;
14813 }
14814 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
14815
14816 u64 arch_irq_stat(void)
14817 {
14818 - u64 sum = atomic_read(&irq_err_count);
14819 + u64 sum = atomic_read_unchecked(&irq_err_count);
14820
14821 #ifdef CONFIG_X86_IO_APIC
14822 - sum += atomic_read(&irq_mis_count);
14823 + sum += atomic_read_unchecked(&irq_mis_count);
14824 #endif
14825 return sum;
14826 }
14827 diff -urNp linux-2.6.32.42/arch/x86/kernel/kgdb.c linux-2.6.32.42/arch/x86/kernel/kgdb.c
14828 --- linux-2.6.32.42/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
14829 +++ linux-2.6.32.42/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
14830 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
14831
14832 /* clear the trace bit */
14833 linux_regs->flags &= ~X86_EFLAGS_TF;
14834 - atomic_set(&kgdb_cpu_doing_single_step, -1);
14835 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
14836
14837 /* set the trace bit if we're stepping */
14838 if (remcomInBuffer[0] == 's') {
14839 linux_regs->flags |= X86_EFLAGS_TF;
14840 kgdb_single_step = 1;
14841 - atomic_set(&kgdb_cpu_doing_single_step,
14842 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
14843 raw_smp_processor_id());
14844 }
14845
14846 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
14847 break;
14848
14849 case DIE_DEBUG:
14850 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
14851 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
14852 raw_smp_processor_id()) {
14853 if (user_mode(regs))
14854 return single_step_cont(regs, args);
14855 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
14856 return instruction_pointer(regs);
14857 }
14858
14859 -struct kgdb_arch arch_kgdb_ops = {
14860 +const struct kgdb_arch arch_kgdb_ops = {
14861 /* Breakpoint instruction: */
14862 .gdb_bpt_instr = { 0xcc },
14863 .flags = KGDB_HW_BREAKPOINT,
14864 diff -urNp linux-2.6.32.42/arch/x86/kernel/kprobes.c linux-2.6.32.42/arch/x86/kernel/kprobes.c
14865 --- linux-2.6.32.42/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
14866 +++ linux-2.6.32.42/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
14867 @@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
14868 char op;
14869 s32 raddr;
14870 } __attribute__((packed)) * jop;
14871 - jop = (struct __arch_jmp_op *)from;
14872 +
14873 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
14874 +
14875 + pax_open_kernel();
14876 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
14877 jop->op = RELATIVEJUMP_INSTRUCTION;
14878 + pax_close_kernel();
14879 }
14880
14881 /*
14882 @@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
14883 kprobe_opcode_t opcode;
14884 kprobe_opcode_t *orig_opcodes = opcodes;
14885
14886 - if (search_exception_tables((unsigned long)opcodes))
14887 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
14888 return 0; /* Page fault may occur on this address. */
14889
14890 retry:
14891 @@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
14892 disp = (u8 *) p->addr + *((s32 *) insn) -
14893 (u8 *) p->ainsn.insn;
14894 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
14895 + pax_open_kernel();
14896 *(s32 *)insn = (s32) disp;
14897 + pax_close_kernel();
14898 }
14899 }
14900 #endif
14901 @@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
14902
14903 static void __kprobes arch_copy_kprobe(struct kprobe *p)
14904 {
14905 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
14906 + pax_open_kernel();
14907 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
14908 + pax_close_kernel();
14909
14910 fix_riprel(p);
14911
14912 - if (can_boost(p->addr))
14913 + if (can_boost(ktla_ktva(p->addr)))
14914 p->ainsn.boostable = 0;
14915 else
14916 p->ainsn.boostable = -1;
14917
14918 - p->opcode = *p->addr;
14919 + p->opcode = *(ktla_ktva(p->addr));
14920 }
14921
14922 int __kprobes arch_prepare_kprobe(struct kprobe *p)
14923 @@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
14924 if (p->opcode == BREAKPOINT_INSTRUCTION)
14925 regs->ip = (unsigned long)p->addr;
14926 else
14927 - regs->ip = (unsigned long)p->ainsn.insn;
14928 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14929 }
14930
14931 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
14932 @@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
14933 if (p->ainsn.boostable == 1 && !p->post_handler) {
14934 /* Boost up -- we can execute copied instructions directly */
14935 reset_current_kprobe();
14936 - regs->ip = (unsigned long)p->ainsn.insn;
14937 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14938 preempt_enable_no_resched();
14939 return;
14940 }
14941 @@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
14942 struct kprobe_ctlblk *kcb;
14943
14944 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
14945 - if (*addr != BREAKPOINT_INSTRUCTION) {
14946 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
14947 /*
14948 * The breakpoint instruction was removed right
14949 * after we hit it. Another cpu has removed
14950 @@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
14951 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
14952 {
14953 unsigned long *tos = stack_addr(regs);
14954 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
14955 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
14956 unsigned long orig_ip = (unsigned long)p->addr;
14957 kprobe_opcode_t *insn = p->ainsn.insn;
14958
14959 @@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
14960 struct die_args *args = data;
14961 int ret = NOTIFY_DONE;
14962
14963 - if (args->regs && user_mode_vm(args->regs))
14964 + if (args->regs && user_mode(args->regs))
14965 return ret;
14966
14967 switch (val) {
14968 diff -urNp linux-2.6.32.42/arch/x86/kernel/ldt.c linux-2.6.32.42/arch/x86/kernel/ldt.c
14969 --- linux-2.6.32.42/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
14970 +++ linux-2.6.32.42/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
14971 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
14972 if (reload) {
14973 #ifdef CONFIG_SMP
14974 preempt_disable();
14975 - load_LDT(pc);
14976 + load_LDT_nolock(pc);
14977 if (!cpumask_equal(mm_cpumask(current->mm),
14978 cpumask_of(smp_processor_id())))
14979 smp_call_function(flush_ldt, current->mm, 1);
14980 preempt_enable();
14981 #else
14982 - load_LDT(pc);
14983 + load_LDT_nolock(pc);
14984 #endif
14985 }
14986 if (oldsize) {
14987 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
14988 return err;
14989
14990 for (i = 0; i < old->size; i++)
14991 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
14992 + write_ldt_entry(new->ldt, i, old->ldt + i);
14993 return 0;
14994 }
14995
14996 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct
14997 retval = copy_ldt(&mm->context, &old_mm->context);
14998 mutex_unlock(&old_mm->context.lock);
14999 }
15000 +
15001 + if (tsk == current) {
15002 + mm->context.vdso = 0;
15003 +
15004 +#ifdef CONFIG_X86_32
15005 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15006 + mm->context.user_cs_base = 0UL;
15007 + mm->context.user_cs_limit = ~0UL;
15008 +
15009 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15010 + cpus_clear(mm->context.cpu_user_cs_mask);
15011 +#endif
15012 +
15013 +#endif
15014 +#endif
15015 +
15016 + }
15017 +
15018 return retval;
15019 }
15020
15021 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15022 }
15023 }
15024
15025 +#ifdef CONFIG_PAX_SEGMEXEC
15026 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15027 + error = -EINVAL;
15028 + goto out_unlock;
15029 + }
15030 +#endif
15031 +
15032 fill_ldt(&ldt, &ldt_info);
15033 if (oldmode)
15034 ldt.avl = 0;
15035 diff -urNp linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c
15036 --- linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15037 +++ linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15038 @@ -26,7 +26,7 @@
15039 #include <asm/system.h>
15040 #include <asm/cacheflush.h>
15041
15042 -static void set_idt(void *newidt, __u16 limit)
15043 +static void set_idt(struct desc_struct *newidt, __u16 limit)
15044 {
15045 struct desc_ptr curidt;
15046
15047 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15048 }
15049
15050
15051 -static void set_gdt(void *newgdt, __u16 limit)
15052 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15053 {
15054 struct desc_ptr curgdt;
15055
15056 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15057 }
15058
15059 control_page = page_address(image->control_code_page);
15060 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15061 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15062
15063 relocate_kernel_ptr = control_page;
15064 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15065 diff -urNp linux-2.6.32.42/arch/x86/kernel/microcode_amd.c linux-2.6.32.42/arch/x86/kernel/microcode_amd.c
15066 --- linux-2.6.32.42/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15067 +++ linux-2.6.32.42/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15068 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15069 uci->mc = NULL;
15070 }
15071
15072 -static struct microcode_ops microcode_amd_ops = {
15073 +static const struct microcode_ops microcode_amd_ops = {
15074 .request_microcode_user = request_microcode_user,
15075 .request_microcode_fw = request_microcode_fw,
15076 .collect_cpu_info = collect_cpu_info_amd,
15077 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15078 .microcode_fini_cpu = microcode_fini_cpu_amd,
15079 };
15080
15081 -struct microcode_ops * __init init_amd_microcode(void)
15082 +const struct microcode_ops * __init init_amd_microcode(void)
15083 {
15084 return &microcode_amd_ops;
15085 }
15086 diff -urNp linux-2.6.32.42/arch/x86/kernel/microcode_core.c linux-2.6.32.42/arch/x86/kernel/microcode_core.c
15087 --- linux-2.6.32.42/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15088 +++ linux-2.6.32.42/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15089 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15090
15091 #define MICROCODE_VERSION "2.00"
15092
15093 -static struct microcode_ops *microcode_ops;
15094 +static const struct microcode_ops *microcode_ops;
15095
15096 /*
15097 * Synchronization.
15098 diff -urNp linux-2.6.32.42/arch/x86/kernel/microcode_intel.c linux-2.6.32.42/arch/x86/kernel/microcode_intel.c
15099 --- linux-2.6.32.42/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15100 +++ linux-2.6.32.42/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15101 @@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15102
15103 static int get_ucode_user(void *to, const void *from, size_t n)
15104 {
15105 - return copy_from_user(to, from, n);
15106 + return copy_from_user(to, (__force const void __user *)from, n);
15107 }
15108
15109 static enum ucode_state
15110 request_microcode_user(int cpu, const void __user *buf, size_t size)
15111 {
15112 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15113 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15114 }
15115
15116 static void microcode_fini_cpu(int cpu)
15117 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15118 uci->mc = NULL;
15119 }
15120
15121 -static struct microcode_ops microcode_intel_ops = {
15122 +static const struct microcode_ops microcode_intel_ops = {
15123 .request_microcode_user = request_microcode_user,
15124 .request_microcode_fw = request_microcode_fw,
15125 .collect_cpu_info = collect_cpu_info,
15126 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15127 .microcode_fini_cpu = microcode_fini_cpu,
15128 };
15129
15130 -struct microcode_ops * __init init_intel_microcode(void)
15131 +const struct microcode_ops * __init init_intel_microcode(void)
15132 {
15133 return &microcode_intel_ops;
15134 }
15135 diff -urNp linux-2.6.32.42/arch/x86/kernel/module.c linux-2.6.32.42/arch/x86/kernel/module.c
15136 --- linux-2.6.32.42/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15137 +++ linux-2.6.32.42/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15138 @@ -34,7 +34,7 @@
15139 #define DEBUGP(fmt...)
15140 #endif
15141
15142 -void *module_alloc(unsigned long size)
15143 +static void *__module_alloc(unsigned long size, pgprot_t prot)
15144 {
15145 struct vm_struct *area;
15146
15147 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15148 if (!area)
15149 return NULL;
15150
15151 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15152 - PAGE_KERNEL_EXEC);
15153 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15154 +}
15155 +
15156 +void *module_alloc(unsigned long size)
15157 +{
15158 +
15159 +#ifdef CONFIG_PAX_KERNEXEC
15160 + return __module_alloc(size, PAGE_KERNEL);
15161 +#else
15162 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15163 +#endif
15164 +
15165 }
15166
15167 /* Free memory returned from module_alloc */
15168 @@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15169 vfree(module_region);
15170 }
15171
15172 +#ifdef CONFIG_PAX_KERNEXEC
15173 +#ifdef CONFIG_X86_32
15174 +void *module_alloc_exec(unsigned long size)
15175 +{
15176 + struct vm_struct *area;
15177 +
15178 + if (size == 0)
15179 + return NULL;
15180 +
15181 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15182 + return area ? area->addr : NULL;
15183 +}
15184 +EXPORT_SYMBOL(module_alloc_exec);
15185 +
15186 +void module_free_exec(struct module *mod, void *module_region)
15187 +{
15188 + vunmap(module_region);
15189 +}
15190 +EXPORT_SYMBOL(module_free_exec);
15191 +#else
15192 +void module_free_exec(struct module *mod, void *module_region)
15193 +{
15194 + module_free(mod, module_region);
15195 +}
15196 +EXPORT_SYMBOL(module_free_exec);
15197 +
15198 +void *module_alloc_exec(unsigned long size)
15199 +{
15200 + return __module_alloc(size, PAGE_KERNEL_RX);
15201 +}
15202 +EXPORT_SYMBOL(module_alloc_exec);
15203 +#endif
15204 +#endif
15205 +
15206 /* We don't need anything special. */
15207 int module_frob_arch_sections(Elf_Ehdr *hdr,
15208 Elf_Shdr *sechdrs,
15209 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15210 unsigned int i;
15211 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15212 Elf32_Sym *sym;
15213 - uint32_t *location;
15214 + uint32_t *plocation, location;
15215
15216 DEBUGP("Applying relocate section %u to %u\n", relsec,
15217 sechdrs[relsec].sh_info);
15218 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15219 /* This is where to make the change */
15220 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15221 - + rel[i].r_offset;
15222 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15223 + location = (uint32_t)plocation;
15224 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15225 + plocation = ktla_ktva((void *)plocation);
15226 /* This is the symbol it is referring to. Note that all
15227 undefined symbols have been resolved. */
15228 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15229 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15230 switch (ELF32_R_TYPE(rel[i].r_info)) {
15231 case R_386_32:
15232 /* We add the value into the location given */
15233 - *location += sym->st_value;
15234 + pax_open_kernel();
15235 + *plocation += sym->st_value;
15236 + pax_close_kernel();
15237 break;
15238 case R_386_PC32:
15239 /* Add the value, subtract its postition */
15240 - *location += sym->st_value - (uint32_t)location;
15241 + pax_open_kernel();
15242 + *plocation += sym->st_value - location;
15243 + pax_close_kernel();
15244 break;
15245 default:
15246 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15247 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15248 case R_X86_64_NONE:
15249 break;
15250 case R_X86_64_64:
15251 + pax_open_kernel();
15252 *(u64 *)loc = val;
15253 + pax_close_kernel();
15254 break;
15255 case R_X86_64_32:
15256 + pax_open_kernel();
15257 *(u32 *)loc = val;
15258 + pax_close_kernel();
15259 if (val != *(u32 *)loc)
15260 goto overflow;
15261 break;
15262 case R_X86_64_32S:
15263 + pax_open_kernel();
15264 *(s32 *)loc = val;
15265 + pax_close_kernel();
15266 if ((s64)val != *(s32 *)loc)
15267 goto overflow;
15268 break;
15269 case R_X86_64_PC32:
15270 val -= (u64)loc;
15271 + pax_open_kernel();
15272 *(u32 *)loc = val;
15273 + pax_close_kernel();
15274 +
15275 #if 0
15276 if ((s64)val != *(s32 *)loc)
15277 goto overflow;
15278 diff -urNp linux-2.6.32.42/arch/x86/kernel/paravirt.c linux-2.6.32.42/arch/x86/kernel/paravirt.c
15279 --- linux-2.6.32.42/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15280 +++ linux-2.6.32.42/arch/x86/kernel/paravirt.c 2011-05-16 21:46:57.000000000 -0400
15281 @@ -122,7 +122,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15282 * corresponding structure. */
15283 static void *get_call_destination(u8 type)
15284 {
15285 - struct paravirt_patch_template tmpl = {
15286 + const struct paravirt_patch_template tmpl = {
15287 .pv_init_ops = pv_init_ops,
15288 .pv_time_ops = pv_time_ops,
15289 .pv_cpu_ops = pv_cpu_ops,
15290 @@ -133,6 +133,9 @@ static void *get_call_destination(u8 typ
15291 .pv_lock_ops = pv_lock_ops,
15292 #endif
15293 };
15294 +
15295 + pax_track_stack();
15296 +
15297 return *((void **)&tmpl + type);
15298 }
15299
15300 @@ -145,14 +148,14 @@ unsigned paravirt_patch_default(u8 type,
15301 if (opfunc == NULL)
15302 /* If there's no function, patch it with a ud2a (BUG) */
15303 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15304 - else if (opfunc == _paravirt_nop)
15305 + else if (opfunc == (void *)_paravirt_nop)
15306 /* If the operation is a nop, then nop the callsite */
15307 ret = paravirt_patch_nop();
15308
15309 /* identity functions just return their single argument */
15310 - else if (opfunc == _paravirt_ident_32)
15311 + else if (opfunc == (void *)_paravirt_ident_32)
15312 ret = paravirt_patch_ident_32(insnbuf, len);
15313 - else if (opfunc == _paravirt_ident_64)
15314 + else if (opfunc == (void *)_paravirt_ident_64)
15315 ret = paravirt_patch_ident_64(insnbuf, len);
15316
15317 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15318 @@ -178,7 +181,7 @@ unsigned paravirt_patch_insns(void *insn
15319 if (insn_len > len || start == NULL)
15320 insn_len = len;
15321 else
15322 - memcpy(insnbuf, start, insn_len);
15323 + memcpy(insnbuf, ktla_ktva(start), insn_len);
15324
15325 return insn_len;
15326 }
15327 @@ -294,22 +297,22 @@ void arch_flush_lazy_mmu_mode(void)
15328 preempt_enable();
15329 }
15330
15331 -struct pv_info pv_info = {
15332 +struct pv_info pv_info __read_only = {
15333 .name = "bare hardware",
15334 .paravirt_enabled = 0,
15335 .kernel_rpl = 0,
15336 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15337 };
15338
15339 -struct pv_init_ops pv_init_ops = {
15340 +struct pv_init_ops pv_init_ops __read_only = {
15341 .patch = native_patch,
15342 };
15343
15344 -struct pv_time_ops pv_time_ops = {
15345 +struct pv_time_ops pv_time_ops __read_only = {
15346 .sched_clock = native_sched_clock,
15347 };
15348
15349 -struct pv_irq_ops pv_irq_ops = {
15350 +struct pv_irq_ops pv_irq_ops __read_only = {
15351 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15352 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15353 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15354 @@ -321,7 +324,7 @@ struct pv_irq_ops pv_irq_ops = {
15355 #endif
15356 };
15357
15358 -struct pv_cpu_ops pv_cpu_ops = {
15359 +struct pv_cpu_ops pv_cpu_ops __read_only = {
15360 .cpuid = native_cpuid,
15361 .get_debugreg = native_get_debugreg,
15362 .set_debugreg = native_set_debugreg,
15363 @@ -382,7 +385,7 @@ struct pv_cpu_ops pv_cpu_ops = {
15364 .end_context_switch = paravirt_nop,
15365 };
15366
15367 -struct pv_apic_ops pv_apic_ops = {
15368 +struct pv_apic_ops pv_apic_ops __read_only = {
15369 #ifdef CONFIG_X86_LOCAL_APIC
15370 .startup_ipi_hook = paravirt_nop,
15371 #endif
15372 @@ -396,7 +399,7 @@ struct pv_apic_ops pv_apic_ops = {
15373 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15374 #endif
15375
15376 -struct pv_mmu_ops pv_mmu_ops = {
15377 +struct pv_mmu_ops pv_mmu_ops __read_only = {
15378
15379 .read_cr2 = native_read_cr2,
15380 .write_cr2 = native_write_cr2,
15381 @@ -467,6 +470,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15382 },
15383
15384 .set_fixmap = native_set_fixmap,
15385 +
15386 +#ifdef CONFIG_PAX_KERNEXEC
15387 + .pax_open_kernel = native_pax_open_kernel,
15388 + .pax_close_kernel = native_pax_close_kernel,
15389 +#endif
15390 +
15391 };
15392
15393 EXPORT_SYMBOL_GPL(pv_time_ops);
15394 diff -urNp linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c
15395 --- linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
15396 +++ linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
15397 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
15398 __raw_spin_lock(lock);
15399 }
15400
15401 -struct pv_lock_ops pv_lock_ops = {
15402 +struct pv_lock_ops pv_lock_ops __read_only = {
15403 #ifdef CONFIG_SMP
15404 .spin_is_locked = __ticket_spin_is_locked,
15405 .spin_is_contended = __ticket_spin_is_contended,
15406 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c
15407 --- linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
15408 +++ linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
15409 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
15410 free_pages((unsigned long)vaddr, get_order(size));
15411 }
15412
15413 -static struct dma_map_ops calgary_dma_ops = {
15414 +static const struct dma_map_ops calgary_dma_ops = {
15415 .alloc_coherent = calgary_alloc_coherent,
15416 .free_coherent = calgary_free_coherent,
15417 .map_sg = calgary_map_sg,
15418 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-dma.c linux-2.6.32.42/arch/x86/kernel/pci-dma.c
15419 --- linux-2.6.32.42/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
15420 +++ linux-2.6.32.42/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
15421 @@ -14,7 +14,7 @@
15422
15423 static int forbid_dac __read_mostly;
15424
15425 -struct dma_map_ops *dma_ops;
15426 +const struct dma_map_ops *dma_ops;
15427 EXPORT_SYMBOL(dma_ops);
15428
15429 static int iommu_sac_force __read_mostly;
15430 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
15431
15432 int dma_supported(struct device *dev, u64 mask)
15433 {
15434 - struct dma_map_ops *ops = get_dma_ops(dev);
15435 + const struct dma_map_ops *ops = get_dma_ops(dev);
15436
15437 #ifdef CONFIG_PCI
15438 if (mask > 0xffffffff && forbid_dac > 0) {
15439 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c
15440 --- linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
15441 +++ linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
15442 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
15443 return -1;
15444 }
15445
15446 -static struct dma_map_ops gart_dma_ops = {
15447 +static const struct dma_map_ops gart_dma_ops = {
15448 .map_sg = gart_map_sg,
15449 .unmap_sg = gart_unmap_sg,
15450 .map_page = gart_map_page,
15451 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-nommu.c linux-2.6.32.42/arch/x86/kernel/pci-nommu.c
15452 --- linux-2.6.32.42/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
15453 +++ linux-2.6.32.42/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
15454 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
15455 flush_write_buffers();
15456 }
15457
15458 -struct dma_map_ops nommu_dma_ops = {
15459 +const struct dma_map_ops nommu_dma_ops = {
15460 .alloc_coherent = dma_generic_alloc_coherent,
15461 .free_coherent = nommu_free_coherent,
15462 .map_sg = nommu_map_sg,
15463 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c
15464 --- linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
15465 +++ linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
15466 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
15467 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
15468 }
15469
15470 -static struct dma_map_ops swiotlb_dma_ops = {
15471 +static const struct dma_map_ops swiotlb_dma_ops = {
15472 .mapping_error = swiotlb_dma_mapping_error,
15473 .alloc_coherent = x86_swiotlb_alloc_coherent,
15474 .free_coherent = swiotlb_free_coherent,
15475 diff -urNp linux-2.6.32.42/arch/x86/kernel/process_32.c linux-2.6.32.42/arch/x86/kernel/process_32.c
15476 --- linux-2.6.32.42/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
15477 +++ linux-2.6.32.42/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
15478 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
15479 unsigned long thread_saved_pc(struct task_struct *tsk)
15480 {
15481 return ((unsigned long *)tsk->thread.sp)[3];
15482 +//XXX return tsk->thread.eip;
15483 }
15484
15485 #ifndef CONFIG_SMP
15486 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
15487 unsigned short ss, gs;
15488 const char *board;
15489
15490 - if (user_mode_vm(regs)) {
15491 + if (user_mode(regs)) {
15492 sp = regs->sp;
15493 ss = regs->ss & 0xffff;
15494 - gs = get_user_gs(regs);
15495 } else {
15496 sp = (unsigned long) (&regs->sp);
15497 savesegment(ss, ss);
15498 - savesegment(gs, gs);
15499 }
15500 + gs = get_user_gs(regs);
15501
15502 printk("\n");
15503
15504 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
15505 regs.bx = (unsigned long) fn;
15506 regs.dx = (unsigned long) arg;
15507
15508 - regs.ds = __USER_DS;
15509 - regs.es = __USER_DS;
15510 + regs.ds = __KERNEL_DS;
15511 + regs.es = __KERNEL_DS;
15512 regs.fs = __KERNEL_PERCPU;
15513 - regs.gs = __KERNEL_STACK_CANARY;
15514 + savesegment(gs, regs.gs);
15515 regs.orig_ax = -1;
15516 regs.ip = (unsigned long) kernel_thread_helper;
15517 regs.cs = __KERNEL_CS | get_kernel_rpl();
15518 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
15519 struct task_struct *tsk;
15520 int err;
15521
15522 - childregs = task_pt_regs(p);
15523 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
15524 *childregs = *regs;
15525 childregs->ax = 0;
15526 childregs->sp = sp;
15527
15528 p->thread.sp = (unsigned long) childregs;
15529 p->thread.sp0 = (unsigned long) (childregs+1);
15530 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15531
15532 p->thread.ip = (unsigned long) ret_from_fork;
15533
15534 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
15535 struct thread_struct *prev = &prev_p->thread,
15536 *next = &next_p->thread;
15537 int cpu = smp_processor_id();
15538 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
15539 + struct tss_struct *tss = init_tss + cpu;
15540 bool preload_fpu;
15541
15542 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
15543 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
15544 */
15545 lazy_save_gs(prev->gs);
15546
15547 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15548 + __set_fs(task_thread_info(next_p)->addr_limit);
15549 +#endif
15550 +
15551 /*
15552 * Load the per-thread Thread-Local Storage descriptor.
15553 */
15554 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
15555 */
15556 arch_end_context_switch(next_p);
15557
15558 + percpu_write(current_task, next_p);
15559 + percpu_write(current_tinfo, &next_p->tinfo);
15560 +
15561 if (preload_fpu)
15562 __math_state_restore();
15563
15564 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
15565 if (prev->gs | next->gs)
15566 lazy_load_gs(next->gs);
15567
15568 - percpu_write(current_task, next_p);
15569 -
15570 return prev_p;
15571 }
15572
15573 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
15574 } while (count++ < 16);
15575 return 0;
15576 }
15577 -
15578 diff -urNp linux-2.6.32.42/arch/x86/kernel/process_64.c linux-2.6.32.42/arch/x86/kernel/process_64.c
15579 --- linux-2.6.32.42/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
15580 +++ linux-2.6.32.42/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
15581 @@ -91,7 +91,7 @@ static void __exit_idle(void)
15582 void exit_idle(void)
15583 {
15584 /* idle loop has pid 0 */
15585 - if (current->pid)
15586 + if (task_pid_nr(current))
15587 return;
15588 __exit_idle();
15589 }
15590 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
15591 if (!board)
15592 board = "";
15593 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
15594 - current->pid, current->comm, print_tainted(),
15595 + task_pid_nr(current), current->comm, print_tainted(),
15596 init_utsname()->release,
15597 (int)strcspn(init_utsname()->version, " "),
15598 init_utsname()->version, board);
15599 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
15600 struct pt_regs *childregs;
15601 struct task_struct *me = current;
15602
15603 - childregs = ((struct pt_regs *)
15604 - (THREAD_SIZE + task_stack_page(p))) - 1;
15605 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
15606 *childregs = *regs;
15607
15608 childregs->ax = 0;
15609 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
15610 p->thread.sp = (unsigned long) childregs;
15611 p->thread.sp0 = (unsigned long) (childregs+1);
15612 p->thread.usersp = me->thread.usersp;
15613 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15614
15615 set_tsk_thread_flag(p, TIF_FORK);
15616
15617 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
15618 struct thread_struct *prev = &prev_p->thread;
15619 struct thread_struct *next = &next_p->thread;
15620 int cpu = smp_processor_id();
15621 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
15622 + struct tss_struct *tss = init_tss + cpu;
15623 unsigned fsindex, gsindex;
15624 bool preload_fpu;
15625
15626 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
15627 prev->usersp = percpu_read(old_rsp);
15628 percpu_write(old_rsp, next->usersp);
15629 percpu_write(current_task, next_p);
15630 + percpu_write(current_tinfo, &next_p->tinfo);
15631
15632 - percpu_write(kernel_stack,
15633 - (unsigned long)task_stack_page(next_p) +
15634 - THREAD_SIZE - KERNEL_STACK_OFFSET);
15635 + percpu_write(kernel_stack, next->sp0);
15636
15637 /*
15638 * Now maybe reload the debug registers and handle I/O bitmaps
15639 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
15640 if (!p || p == current || p->state == TASK_RUNNING)
15641 return 0;
15642 stack = (unsigned long)task_stack_page(p);
15643 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15644 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15645 return 0;
15646 fp = *(u64 *)(p->thread.sp);
15647 do {
15648 - if (fp < (unsigned long)stack ||
15649 - fp >= (unsigned long)stack+THREAD_SIZE)
15650 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15651 return 0;
15652 ip = *(u64 *)(fp+8);
15653 if (!in_sched_functions(ip))
15654 diff -urNp linux-2.6.32.42/arch/x86/kernel/process.c linux-2.6.32.42/arch/x86/kernel/process.c
15655 --- linux-2.6.32.42/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
15656 +++ linux-2.6.32.42/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
15657 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
15658
15659 void free_thread_info(struct thread_info *ti)
15660 {
15661 - free_thread_xstate(ti->task);
15662 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
15663 }
15664
15665 +static struct kmem_cache *task_struct_cachep;
15666 +
15667 void arch_task_cache_init(void)
15668 {
15669 - task_xstate_cachep =
15670 - kmem_cache_create("task_xstate", xstate_size,
15671 + /* create a slab on which task_structs can be allocated */
15672 + task_struct_cachep =
15673 + kmem_cache_create("task_struct", sizeof(struct task_struct),
15674 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
15675 +
15676 + task_xstate_cachep =
15677 + kmem_cache_create("task_xstate", xstate_size,
15678 __alignof__(union thread_xstate),
15679 - SLAB_PANIC | SLAB_NOTRACK, NULL);
15680 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
15681 +}
15682 +
15683 +struct task_struct *alloc_task_struct(void)
15684 +{
15685 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
15686 +}
15687 +
15688 +void free_task_struct(struct task_struct *task)
15689 +{
15690 + free_thread_xstate(task);
15691 + kmem_cache_free(task_struct_cachep, task);
15692 }
15693
15694 /*
15695 @@ -73,7 +90,7 @@ void exit_thread(void)
15696 unsigned long *bp = t->io_bitmap_ptr;
15697
15698 if (bp) {
15699 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
15700 + struct tss_struct *tss = init_tss + get_cpu();
15701
15702 t->io_bitmap_ptr = NULL;
15703 clear_thread_flag(TIF_IO_BITMAP);
15704 @@ -93,6 +110,9 @@ void flush_thread(void)
15705
15706 clear_tsk_thread_flag(tsk, TIF_DEBUG);
15707
15708 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15709 + loadsegment(gs, 0);
15710 +#endif
15711 tsk->thread.debugreg0 = 0;
15712 tsk->thread.debugreg1 = 0;
15713 tsk->thread.debugreg2 = 0;
15714 @@ -307,7 +327,7 @@ void default_idle(void)
15715 EXPORT_SYMBOL(default_idle);
15716 #endif
15717
15718 -void stop_this_cpu(void *dummy)
15719 +__noreturn void stop_this_cpu(void *dummy)
15720 {
15721 local_irq_disable();
15722 /*
15723 @@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
15724 }
15725 early_param("idle", idle_setup);
15726
15727 -unsigned long arch_align_stack(unsigned long sp)
15728 +#ifdef CONFIG_PAX_RANDKSTACK
15729 +asmlinkage void pax_randomize_kstack(void)
15730 {
15731 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
15732 - sp -= get_random_int() % 8192;
15733 - return sp & ~0xf;
15734 -}
15735 + struct thread_struct *thread = &current->thread;
15736 + unsigned long time;
15737
15738 -unsigned long arch_randomize_brk(struct mm_struct *mm)
15739 -{
15740 - unsigned long range_end = mm->brk + 0x02000000;
15741 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
15742 + if (!randomize_va_space)
15743 + return;
15744 +
15745 + rdtscl(time);
15746 +
15747 + /* P4 seems to return a 0 LSB, ignore it */
15748 +#ifdef CONFIG_MPENTIUM4
15749 + time &= 0x3EUL;
15750 + time <<= 2;
15751 +#elif defined(CONFIG_X86_64)
15752 + time &= 0xFUL;
15753 + time <<= 4;
15754 +#else
15755 + time &= 0x1FUL;
15756 + time <<= 3;
15757 +#endif
15758 +
15759 + thread->sp0 ^= time;
15760 + load_sp0(init_tss + smp_processor_id(), thread);
15761 +
15762 +#ifdef CONFIG_X86_64
15763 + percpu_write(kernel_stack, thread->sp0);
15764 +#endif
15765 }
15766 +#endif
15767
15768 diff -urNp linux-2.6.32.42/arch/x86/kernel/ptrace.c linux-2.6.32.42/arch/x86/kernel/ptrace.c
15769 --- linux-2.6.32.42/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
15770 +++ linux-2.6.32.42/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
15771 @@ -925,7 +925,7 @@ static const struct user_regset_view use
15772 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
15773 {
15774 int ret;
15775 - unsigned long __user *datap = (unsigned long __user *)data;
15776 + unsigned long __user *datap = (__force unsigned long __user *)data;
15777
15778 switch (request) {
15779 /* read the word at location addr in the USER area. */
15780 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
15781 if (addr < 0)
15782 return -EIO;
15783 ret = do_get_thread_area(child, addr,
15784 - (struct user_desc __user *) data);
15785 + (__force struct user_desc __user *) data);
15786 break;
15787
15788 case PTRACE_SET_THREAD_AREA:
15789 if (addr < 0)
15790 return -EIO;
15791 ret = do_set_thread_area(child, addr,
15792 - (struct user_desc __user *) data, 0);
15793 + (__force struct user_desc __user *) data, 0);
15794 break;
15795 #endif
15796
15797 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
15798 #ifdef CONFIG_X86_PTRACE_BTS
15799 case PTRACE_BTS_CONFIG:
15800 ret = ptrace_bts_config
15801 - (child, data, (struct ptrace_bts_config __user *)addr);
15802 + (child, data, (__force struct ptrace_bts_config __user *)addr);
15803 break;
15804
15805 case PTRACE_BTS_STATUS:
15806 ret = ptrace_bts_status
15807 - (child, data, (struct ptrace_bts_config __user *)addr);
15808 + (child, data, (__force struct ptrace_bts_config __user *)addr);
15809 break;
15810
15811 case PTRACE_BTS_SIZE:
15812 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
15813
15814 case PTRACE_BTS_GET:
15815 ret = ptrace_bts_read_record
15816 - (child, data, (struct bts_struct __user *) addr);
15817 + (child, data, (__force struct bts_struct __user *) addr);
15818 break;
15819
15820 case PTRACE_BTS_CLEAR:
15821 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
15822
15823 case PTRACE_BTS_DRAIN:
15824 ret = ptrace_bts_drain
15825 - (child, data, (struct bts_struct __user *) addr);
15826 + (child, data, (__force struct bts_struct __user *) addr);
15827 break;
15828 #endif /* CONFIG_X86_PTRACE_BTS */
15829
15830 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
15831 info.si_code = si_code;
15832
15833 /* User-mode ip? */
15834 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
15835 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
15836
15837 /* Send us the fake SIGTRAP */
15838 force_sig_info(SIGTRAP, &info, tsk);
15839 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
15840 * We must return the syscall number to actually look up in the table.
15841 * This can be -1L to skip running any syscall at all.
15842 */
15843 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
15844 +long syscall_trace_enter(struct pt_regs *regs)
15845 {
15846 long ret = 0;
15847
15848 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
15849 return ret ?: regs->orig_ax;
15850 }
15851
15852 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
15853 +void syscall_trace_leave(struct pt_regs *regs)
15854 {
15855 if (unlikely(current->audit_context))
15856 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
15857 diff -urNp linux-2.6.32.42/arch/x86/kernel/reboot.c linux-2.6.32.42/arch/x86/kernel/reboot.c
15858 --- linux-2.6.32.42/arch/x86/kernel/reboot.c 2011-03-27 14:31:47.000000000 -0400
15859 +++ linux-2.6.32.42/arch/x86/kernel/reboot.c 2011-05-22 23:02:03.000000000 -0400
15860 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
15861 EXPORT_SYMBOL(pm_power_off);
15862
15863 static const struct desc_ptr no_idt = {};
15864 -static int reboot_mode;
15865 +static unsigned short reboot_mode;
15866 enum reboot_type reboot_type = BOOT_KBD;
15867 int reboot_force;
15868
15869 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
15870 controller to pulse the CPU reset line, which is more thorough, but
15871 doesn't work with at least one type of 486 motherboard. It is easy
15872 to stop this code working; hence the copious comments. */
15873 -static const unsigned long long
15874 -real_mode_gdt_entries [3] =
15875 +static struct desc_struct
15876 +real_mode_gdt_entries [3] __read_only =
15877 {
15878 - 0x0000000000000000ULL, /* Null descriptor */
15879 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
15880 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
15881 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
15882 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
15883 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
15884 };
15885
15886 static const struct desc_ptr
15887 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
15888 * specified by the code and length parameters.
15889 * We assume that length will aways be less that 100!
15890 */
15891 -void machine_real_restart(const unsigned char *code, int length)
15892 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
15893 {
15894 local_irq_disable();
15895
15896 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
15897 /* Remap the kernel at virtual address zero, as well as offset zero
15898 from the kernel segment. This assumes the kernel segment starts at
15899 virtual address PAGE_OFFSET. */
15900 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15901 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
15902 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15903 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
15904
15905 /*
15906 * Use `swapper_pg_dir' as our page directory.
15907 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
15908 boot)". This seems like a fairly standard thing that gets set by
15909 REBOOT.COM programs, and the previous reset routine did this
15910 too. */
15911 - *((unsigned short *)0x472) = reboot_mode;
15912 + *(unsigned short *)(__va(0x472)) = reboot_mode;
15913
15914 /* For the switch to real mode, copy some code to low memory. It has
15915 to be in the first 64k because it is running in 16-bit mode, and it
15916 has to have the same physical and virtual address, because it turns
15917 off paging. Copy it near the end of the first page, out of the way
15918 of BIOS variables. */
15919 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
15920 - real_mode_switch, sizeof (real_mode_switch));
15921 - memcpy((void *)(0x1000 - 100), code, length);
15922 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
15923 + memcpy(__va(0x1000 - 100), code, length);
15924
15925 /* Set up the IDT for real mode. */
15926 load_idt(&real_mode_idt);
15927 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
15928 __asm__ __volatile__ ("ljmp $0x0008,%0"
15929 :
15930 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
15931 + do { } while (1);
15932 }
15933 #ifdef CONFIG_APM_MODULE
15934 EXPORT_SYMBOL(machine_real_restart);
15935 @@ -536,7 +536,7 @@ void __attribute__((weak)) mach_reboot_f
15936 {
15937 }
15938
15939 -static void native_machine_emergency_restart(void)
15940 +__noreturn static void native_machine_emergency_restart(void)
15941 {
15942 int i;
15943
15944 @@ -651,13 +651,13 @@ void native_machine_shutdown(void)
15945 #endif
15946 }
15947
15948 -static void __machine_emergency_restart(int emergency)
15949 +static __noreturn void __machine_emergency_restart(int emergency)
15950 {
15951 reboot_emergency = emergency;
15952 machine_ops.emergency_restart();
15953 }
15954
15955 -static void native_machine_restart(char *__unused)
15956 +static __noreturn void native_machine_restart(char *__unused)
15957 {
15958 printk("machine restart\n");
15959
15960 @@ -666,7 +666,7 @@ static void native_machine_restart(char
15961 __machine_emergency_restart(0);
15962 }
15963
15964 -static void native_machine_halt(void)
15965 +static __noreturn void native_machine_halt(void)
15966 {
15967 /* stop other cpus and apics */
15968 machine_shutdown();
15969 @@ -677,7 +677,7 @@ static void native_machine_halt(void)
15970 stop_this_cpu(NULL);
15971 }
15972
15973 -static void native_machine_power_off(void)
15974 +__noreturn static void native_machine_power_off(void)
15975 {
15976 if (pm_power_off) {
15977 if (!reboot_force)
15978 @@ -686,6 +686,7 @@ static void native_machine_power_off(voi
15979 }
15980 /* a fallback in case there is no PM info available */
15981 tboot_shutdown(TB_SHUTDOWN_HALT);
15982 + do { } while (1);
15983 }
15984
15985 struct machine_ops machine_ops = {
15986 diff -urNp linux-2.6.32.42/arch/x86/kernel/setup.c linux-2.6.32.42/arch/x86/kernel/setup.c
15987 --- linux-2.6.32.42/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
15988 +++ linux-2.6.32.42/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
15989 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
15990
15991 if (!boot_params.hdr.root_flags)
15992 root_mountflags &= ~MS_RDONLY;
15993 - init_mm.start_code = (unsigned long) _text;
15994 - init_mm.end_code = (unsigned long) _etext;
15995 + init_mm.start_code = ktla_ktva((unsigned long) _text);
15996 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
15997 init_mm.end_data = (unsigned long) _edata;
15998 init_mm.brk = _brk_end;
15999
16000 - code_resource.start = virt_to_phys(_text);
16001 - code_resource.end = virt_to_phys(_etext)-1;
16002 - data_resource.start = virt_to_phys(_etext);
16003 + code_resource.start = virt_to_phys(ktla_ktva(_text));
16004 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16005 + data_resource.start = virt_to_phys(_sdata);
16006 data_resource.end = virt_to_phys(_edata)-1;
16007 bss_resource.start = virt_to_phys(&__bss_start);
16008 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16009 diff -urNp linux-2.6.32.42/arch/x86/kernel/setup_percpu.c linux-2.6.32.42/arch/x86/kernel/setup_percpu.c
16010 --- linux-2.6.32.42/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16011 +++ linux-2.6.32.42/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16012 @@ -25,19 +25,17 @@
16013 # define DBG(x...)
16014 #endif
16015
16016 -DEFINE_PER_CPU(int, cpu_number);
16017 +#ifdef CONFIG_SMP
16018 +DEFINE_PER_CPU(unsigned int, cpu_number);
16019 EXPORT_PER_CPU_SYMBOL(cpu_number);
16020 +#endif
16021
16022 -#ifdef CONFIG_X86_64
16023 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16024 -#else
16025 -#define BOOT_PERCPU_OFFSET 0
16026 -#endif
16027
16028 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16029 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16030
16031 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16032 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16033 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16034 };
16035 EXPORT_SYMBOL(__per_cpu_offset);
16036 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16037 {
16038 #ifdef CONFIG_X86_32
16039 struct desc_struct gdt;
16040 + unsigned long base = per_cpu_offset(cpu);
16041
16042 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16043 - 0x2 | DESCTYPE_S, 0x8);
16044 - gdt.s = 1;
16045 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16046 + 0x83 | DESCTYPE_S, 0xC);
16047 write_gdt_entry(get_cpu_gdt_table(cpu),
16048 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16049 #endif
16050 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16051 /* alrighty, percpu areas up and running */
16052 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16053 for_each_possible_cpu(cpu) {
16054 +#ifdef CONFIG_CC_STACKPROTECTOR
16055 +#ifdef CONFIG_X86_32
16056 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
16057 +#endif
16058 +#endif
16059 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16060 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16061 per_cpu(cpu_number, cpu) = cpu;
16062 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16063 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16064 #endif
16065 #endif
16066 +#ifdef CONFIG_CC_STACKPROTECTOR
16067 +#ifdef CONFIG_X86_32
16068 + if (!cpu)
16069 + per_cpu(stack_canary.canary, cpu) = canary;
16070 +#endif
16071 +#endif
16072 /*
16073 * Up to this point, the boot CPU has been using .data.init
16074 * area. Reload any changed state for the boot CPU.
16075 diff -urNp linux-2.6.32.42/arch/x86/kernel/signal.c linux-2.6.32.42/arch/x86/kernel/signal.c
16076 --- linux-2.6.32.42/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16077 +++ linux-2.6.32.42/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16078 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16079 * Align the stack pointer according to the i386 ABI,
16080 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16081 */
16082 - sp = ((sp + 4) & -16ul) - 4;
16083 + sp = ((sp - 12) & -16ul) - 4;
16084 #else /* !CONFIG_X86_32 */
16085 sp = round_down(sp, 16) - 8;
16086 #endif
16087 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16088 * Return an always-bogus address instead so we will die with SIGSEGV.
16089 */
16090 if (onsigstack && !likely(on_sig_stack(sp)))
16091 - return (void __user *)-1L;
16092 + return (__force void __user *)-1L;
16093
16094 /* save i387 state */
16095 if (used_math() && save_i387_xstate(*fpstate) < 0)
16096 - return (void __user *)-1L;
16097 + return (__force void __user *)-1L;
16098
16099 return (void __user *)sp;
16100 }
16101 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16102 }
16103
16104 if (current->mm->context.vdso)
16105 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16106 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16107 else
16108 - restorer = &frame->retcode;
16109 + restorer = (void __user *)&frame->retcode;
16110 if (ka->sa.sa_flags & SA_RESTORER)
16111 restorer = ka->sa.sa_restorer;
16112
16113 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16114 * reasons and because gdb uses it as a signature to notice
16115 * signal handler stack frames.
16116 */
16117 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16118 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16119
16120 if (err)
16121 return -EFAULT;
16122 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16123 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16124
16125 /* Set up to return from userspace. */
16126 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16127 + if (current->mm->context.vdso)
16128 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16129 + else
16130 + restorer = (void __user *)&frame->retcode;
16131 if (ka->sa.sa_flags & SA_RESTORER)
16132 restorer = ka->sa.sa_restorer;
16133 put_user_ex(restorer, &frame->pretcode);
16134 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16135 * reasons and because gdb uses it as a signature to notice
16136 * signal handler stack frames.
16137 */
16138 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16139 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16140 } put_user_catch(err);
16141
16142 if (err)
16143 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16144 int signr;
16145 sigset_t *oldset;
16146
16147 + pax_track_stack();
16148 +
16149 /*
16150 * We want the common case to go fast, which is why we may in certain
16151 * cases get here from kernel mode. Just return without doing anything
16152 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16153 * X86_32: vm86 regs switched out by assembly code before reaching
16154 * here, so testing against kernel CS suffices.
16155 */
16156 - if (!user_mode(regs))
16157 + if (!user_mode_novm(regs))
16158 return;
16159
16160 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16161 diff -urNp linux-2.6.32.42/arch/x86/kernel/smpboot.c linux-2.6.32.42/arch/x86/kernel/smpboot.c
16162 --- linux-2.6.32.42/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16163 +++ linux-2.6.32.42/arch/x86/kernel/smpboot.c 2011-05-11 18:25:15.000000000 -0400
16164 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16165 */
16166 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16167
16168 -void cpu_hotplug_driver_lock()
16169 +void cpu_hotplug_driver_lock(void)
16170 {
16171 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
16172 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
16173 }
16174
16175 -void cpu_hotplug_driver_unlock()
16176 +void cpu_hotplug_driver_unlock(void)
16177 {
16178 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16179 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16180 }
16181
16182 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16183 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16184 set_idle_for_cpu(cpu, c_idle.idle);
16185 do_rest:
16186 per_cpu(current_task, cpu) = c_idle.idle;
16187 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16188 #ifdef CONFIG_X86_32
16189 /* Stack for startup_32 can be just as for start_secondary onwards */
16190 irq_ctx_init(cpu);
16191 @@ -750,11 +751,13 @@ do_rest:
16192 #else
16193 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16194 initial_gs = per_cpu_offset(cpu);
16195 - per_cpu(kernel_stack, cpu) =
16196 - (unsigned long)task_stack_page(c_idle.idle) -
16197 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16198 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16199 #endif
16200 +
16201 + pax_open_kernel();
16202 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16203 + pax_close_kernel();
16204 +
16205 initial_code = (unsigned long)start_secondary;
16206 stack_start.sp = (void *) c_idle.idle->thread.sp;
16207
16208 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16209
16210 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16211
16212 +#ifdef CONFIG_PAX_PER_CPU_PGD
16213 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16214 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16215 + KERNEL_PGD_PTRS);
16216 +#endif
16217 +
16218 err = do_boot_cpu(apicid, cpu);
16219
16220 if (err) {
16221 diff -urNp linux-2.6.32.42/arch/x86/kernel/step.c linux-2.6.32.42/arch/x86/kernel/step.c
16222 --- linux-2.6.32.42/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16223 +++ linux-2.6.32.42/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16224 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16225 struct desc_struct *desc;
16226 unsigned long base;
16227
16228 - seg &= ~7UL;
16229 + seg >>= 3;
16230
16231 mutex_lock(&child->mm->context.lock);
16232 - if (unlikely((seg >> 3) >= child->mm->context.size))
16233 + if (unlikely(seg >= child->mm->context.size))
16234 addr = -1L; /* bogus selector, access would fault */
16235 else {
16236 desc = child->mm->context.ldt + seg;
16237 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16238 addr += base;
16239 }
16240 mutex_unlock(&child->mm->context.lock);
16241 - }
16242 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16243 + addr = ktla_ktva(addr);
16244
16245 return addr;
16246 }
16247 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16248 unsigned char opcode[15];
16249 unsigned long addr = convert_ip_to_linear(child, regs);
16250
16251 + if (addr == -EINVAL)
16252 + return 0;
16253 +
16254 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16255 for (i = 0; i < copied; i++) {
16256 switch (opcode[i]) {
16257 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16258
16259 #ifdef CONFIG_X86_64
16260 case 0x40 ... 0x4f:
16261 - if (regs->cs != __USER_CS)
16262 + if ((regs->cs & 0xffff) != __USER_CS)
16263 /* 32-bit mode: register increment */
16264 return 0;
16265 /* 64-bit mode: REX prefix */
16266 diff -urNp linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S
16267 --- linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16268 +++ linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16269 @@ -1,3 +1,4 @@
16270 +.section .rodata,"a",@progbits
16271 ENTRY(sys_call_table)
16272 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16273 .long sys_exit
16274 diff -urNp linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c
16275 --- linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16276 +++ linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16277 @@ -24,6 +24,21 @@
16278
16279 #include <asm/syscalls.h>
16280
16281 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16282 +{
16283 + unsigned long pax_task_size = TASK_SIZE;
16284 +
16285 +#ifdef CONFIG_PAX_SEGMEXEC
16286 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16287 + pax_task_size = SEGMEXEC_TASK_SIZE;
16288 +#endif
16289 +
16290 + if (len > pax_task_size || addr > pax_task_size - len)
16291 + return -EINVAL;
16292 +
16293 + return 0;
16294 +}
16295 +
16296 /*
16297 * Perform the select(nd, in, out, ex, tv) and mmap() system
16298 * calls. Linux/i386 didn't use to be able to handle more than
16299 @@ -58,6 +73,212 @@ out:
16300 return err;
16301 }
16302
16303 +unsigned long
16304 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
16305 + unsigned long len, unsigned long pgoff, unsigned long flags)
16306 +{
16307 + struct mm_struct *mm = current->mm;
16308 + struct vm_area_struct *vma;
16309 + unsigned long start_addr, pax_task_size = TASK_SIZE;
16310 +
16311 +#ifdef CONFIG_PAX_SEGMEXEC
16312 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16313 + pax_task_size = SEGMEXEC_TASK_SIZE;
16314 +#endif
16315 +
16316 + pax_task_size -= PAGE_SIZE;
16317 +
16318 + if (len > pax_task_size)
16319 + return -ENOMEM;
16320 +
16321 + if (flags & MAP_FIXED)
16322 + return addr;
16323 +
16324 +#ifdef CONFIG_PAX_RANDMMAP
16325 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16326 +#endif
16327 +
16328 + if (addr) {
16329 + addr = PAGE_ALIGN(addr);
16330 + if (pax_task_size - len >= addr) {
16331 + vma = find_vma(mm, addr);
16332 + if (check_heap_stack_gap(vma, addr, len))
16333 + return addr;
16334 + }
16335 + }
16336 + if (len > mm->cached_hole_size) {
16337 + start_addr = addr = mm->free_area_cache;
16338 + } else {
16339 + start_addr = addr = mm->mmap_base;
16340 + mm->cached_hole_size = 0;
16341 + }
16342 +
16343 +#ifdef CONFIG_PAX_PAGEEXEC
16344 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16345 + start_addr = 0x00110000UL;
16346 +
16347 +#ifdef CONFIG_PAX_RANDMMAP
16348 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16349 + start_addr += mm->delta_mmap & 0x03FFF000UL;
16350 +#endif
16351 +
16352 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16353 + start_addr = addr = mm->mmap_base;
16354 + else
16355 + addr = start_addr;
16356 + }
16357 +#endif
16358 +
16359 +full_search:
16360 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16361 + /* At this point: (!vma || addr < vma->vm_end). */
16362 + if (pax_task_size - len < addr) {
16363 + /*
16364 + * Start a new search - just in case we missed
16365 + * some holes.
16366 + */
16367 + if (start_addr != mm->mmap_base) {
16368 + start_addr = addr = mm->mmap_base;
16369 + mm->cached_hole_size = 0;
16370 + goto full_search;
16371 + }
16372 + return -ENOMEM;
16373 + }
16374 + if (check_heap_stack_gap(vma, addr, len))
16375 + break;
16376 + if (addr + mm->cached_hole_size < vma->vm_start)
16377 + mm->cached_hole_size = vma->vm_start - addr;
16378 + addr = vma->vm_end;
16379 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
16380 + start_addr = addr = mm->mmap_base;
16381 + mm->cached_hole_size = 0;
16382 + goto full_search;
16383 + }
16384 + }
16385 +
16386 + /*
16387 + * Remember the place where we stopped the search:
16388 + */
16389 + mm->free_area_cache = addr + len;
16390 + return addr;
16391 +}
16392 +
16393 +unsigned long
16394 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16395 + const unsigned long len, const unsigned long pgoff,
16396 + const unsigned long flags)
16397 +{
16398 + struct vm_area_struct *vma;
16399 + struct mm_struct *mm = current->mm;
16400 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16401 +
16402 +#ifdef CONFIG_PAX_SEGMEXEC
16403 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16404 + pax_task_size = SEGMEXEC_TASK_SIZE;
16405 +#endif
16406 +
16407 + pax_task_size -= PAGE_SIZE;
16408 +
16409 + /* requested length too big for entire address space */
16410 + if (len > pax_task_size)
16411 + return -ENOMEM;
16412 +
16413 + if (flags & MAP_FIXED)
16414 + return addr;
16415 +
16416 +#ifdef CONFIG_PAX_PAGEEXEC
16417 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16418 + goto bottomup;
16419 +#endif
16420 +
16421 +#ifdef CONFIG_PAX_RANDMMAP
16422 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16423 +#endif
16424 +
16425 + /* requesting a specific address */
16426 + if (addr) {
16427 + addr = PAGE_ALIGN(addr);
16428 + if (pax_task_size - len >= addr) {
16429 + vma = find_vma(mm, addr);
16430 + if (check_heap_stack_gap(vma, addr, len))
16431 + return addr;
16432 + }
16433 + }
16434 +
16435 + /* check if free_area_cache is useful for us */
16436 + if (len <= mm->cached_hole_size) {
16437 + mm->cached_hole_size = 0;
16438 + mm->free_area_cache = mm->mmap_base;
16439 + }
16440 +
16441 + /* either no address requested or can't fit in requested address hole */
16442 + addr = mm->free_area_cache;
16443 +
16444 + /* make sure it can fit in the remaining address space */
16445 + if (addr > len) {
16446 + vma = find_vma(mm, addr-len);
16447 + if (check_heap_stack_gap(vma, addr - len, len))
16448 + /* remember the address as a hint for next time */
16449 + return (mm->free_area_cache = addr-len);
16450 + }
16451 +
16452 + if (mm->mmap_base < len)
16453 + goto bottomup;
16454 +
16455 + addr = mm->mmap_base-len;
16456 +
16457 + do {
16458 + /*
16459 + * Lookup failure means no vma is above this address,
16460 + * else if new region fits below vma->vm_start,
16461 + * return with success:
16462 + */
16463 + vma = find_vma(mm, addr);
16464 + if (check_heap_stack_gap(vma, addr, len))
16465 + /* remember the address as a hint for next time */
16466 + return (mm->free_area_cache = addr);
16467 +
16468 + /* remember the largest hole we saw so far */
16469 + if (addr + mm->cached_hole_size < vma->vm_start)
16470 + mm->cached_hole_size = vma->vm_start - addr;
16471 +
16472 + /* try just below the current vma->vm_start */
16473 + addr = skip_heap_stack_gap(vma, len);
16474 + } while (!IS_ERR_VALUE(addr));
16475 +
16476 +bottomup:
16477 + /*
16478 + * A failed mmap() very likely causes application failure,
16479 + * so fall back to the bottom-up function here. This scenario
16480 + * can happen with large stack limits and large mmap()
16481 + * allocations.
16482 + */
16483 +
16484 +#ifdef CONFIG_PAX_SEGMEXEC
16485 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16486 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
16487 + else
16488 +#endif
16489 +
16490 + mm->mmap_base = TASK_UNMAPPED_BASE;
16491 +
16492 +#ifdef CONFIG_PAX_RANDMMAP
16493 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16494 + mm->mmap_base += mm->delta_mmap;
16495 +#endif
16496 +
16497 + mm->free_area_cache = mm->mmap_base;
16498 + mm->cached_hole_size = ~0UL;
16499 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16500 + /*
16501 + * Restore the topdown base:
16502 + */
16503 + mm->mmap_base = base;
16504 + mm->free_area_cache = base;
16505 + mm->cached_hole_size = ~0UL;
16506 +
16507 + return addr;
16508 +}
16509
16510 struct sel_arg_struct {
16511 unsigned long n;
16512 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
16513 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
16514 case SEMTIMEDOP:
16515 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
16516 - (const struct timespec __user *)fifth);
16517 + (__force const struct timespec __user *)fifth);
16518
16519 case SEMGET:
16520 return sys_semget(first, second, third);
16521 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
16522 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
16523 if (ret)
16524 return ret;
16525 - return put_user(raddr, (ulong __user *) third);
16526 + return put_user(raddr, (__force ulong __user *) third);
16527 }
16528 case 1: /* iBCS2 emulator entry point */
16529 if (!segment_eq(get_fs(), get_ds()))
16530 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
16531
16532 return error;
16533 }
16534 -
16535 -
16536 -/*
16537 - * Do a system call from kernel instead of calling sys_execve so we
16538 - * end up with proper pt_regs.
16539 - */
16540 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
16541 -{
16542 - long __res;
16543 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
16544 - : "=a" (__res)
16545 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
16546 - return __res;
16547 -}
16548 diff -urNp linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c
16549 --- linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
16550 +++ linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
16551 @@ -32,8 +32,8 @@ out:
16552 return error;
16553 }
16554
16555 -static void find_start_end(unsigned long flags, unsigned long *begin,
16556 - unsigned long *end)
16557 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
16558 + unsigned long *begin, unsigned long *end)
16559 {
16560 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
16561 unsigned long new_begin;
16562 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
16563 *begin = new_begin;
16564 }
16565 } else {
16566 - *begin = TASK_UNMAPPED_BASE;
16567 + *begin = mm->mmap_base;
16568 *end = TASK_SIZE;
16569 }
16570 }
16571 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
16572 if (flags & MAP_FIXED)
16573 return addr;
16574
16575 - find_start_end(flags, &begin, &end);
16576 + find_start_end(mm, flags, &begin, &end);
16577
16578 if (len > end)
16579 return -ENOMEM;
16580
16581 +#ifdef CONFIG_PAX_RANDMMAP
16582 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16583 +#endif
16584 +
16585 if (addr) {
16586 addr = PAGE_ALIGN(addr);
16587 vma = find_vma(mm, addr);
16588 - if (end - len >= addr &&
16589 - (!vma || addr + len <= vma->vm_start))
16590 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
16591 return addr;
16592 }
16593 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
16594 @@ -106,7 +109,7 @@ full_search:
16595 }
16596 return -ENOMEM;
16597 }
16598 - if (!vma || addr + len <= vma->vm_start) {
16599 + if (check_heap_stack_gap(vma, addr, len)) {
16600 /*
16601 * Remember the place where we stopped the search:
16602 */
16603 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
16604 {
16605 struct vm_area_struct *vma;
16606 struct mm_struct *mm = current->mm;
16607 - unsigned long addr = addr0;
16608 + unsigned long base = mm->mmap_base, addr = addr0;
16609
16610 /* requested length too big for entire address space */
16611 if (len > TASK_SIZE)
16612 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
16613 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
16614 goto bottomup;
16615
16616 +#ifdef CONFIG_PAX_RANDMMAP
16617 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16618 +#endif
16619 +
16620 /* requesting a specific address */
16621 if (addr) {
16622 addr = PAGE_ALIGN(addr);
16623 - vma = find_vma(mm, addr);
16624 - if (TASK_SIZE - len >= addr &&
16625 - (!vma || addr + len <= vma->vm_start))
16626 - return addr;
16627 + if (TASK_SIZE - len >= addr) {
16628 + vma = find_vma(mm, addr);
16629 + if (check_heap_stack_gap(vma, addr, len))
16630 + return addr;
16631 + }
16632 }
16633
16634 /* check if free_area_cache is useful for us */
16635 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
16636 /* make sure it can fit in the remaining address space */
16637 if (addr > len) {
16638 vma = find_vma(mm, addr-len);
16639 - if (!vma || addr <= vma->vm_start)
16640 + if (check_heap_stack_gap(vma, addr - len, len))
16641 /* remember the address as a hint for next time */
16642 return mm->free_area_cache = addr-len;
16643 }
16644 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
16645 * return with success:
16646 */
16647 vma = find_vma(mm, addr);
16648 - if (!vma || addr+len <= vma->vm_start)
16649 + if (check_heap_stack_gap(vma, addr, len))
16650 /* remember the address as a hint for next time */
16651 return mm->free_area_cache = addr;
16652
16653 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
16654 mm->cached_hole_size = vma->vm_start - addr;
16655
16656 /* try just below the current vma->vm_start */
16657 - addr = vma->vm_start-len;
16658 - } while (len < vma->vm_start);
16659 + addr = skip_heap_stack_gap(vma, len);
16660 + } while (!IS_ERR_VALUE(addr));
16661
16662 bottomup:
16663 /*
16664 @@ -198,13 +206,21 @@ bottomup:
16665 * can happen with large stack limits and large mmap()
16666 * allocations.
16667 */
16668 + mm->mmap_base = TASK_UNMAPPED_BASE;
16669 +
16670 +#ifdef CONFIG_PAX_RANDMMAP
16671 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16672 + mm->mmap_base += mm->delta_mmap;
16673 +#endif
16674 +
16675 + mm->free_area_cache = mm->mmap_base;
16676 mm->cached_hole_size = ~0UL;
16677 - mm->free_area_cache = TASK_UNMAPPED_BASE;
16678 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16679 /*
16680 * Restore the topdown base:
16681 */
16682 - mm->free_area_cache = mm->mmap_base;
16683 + mm->mmap_base = base;
16684 + mm->free_area_cache = base;
16685 mm->cached_hole_size = ~0UL;
16686
16687 return addr;
16688 diff -urNp linux-2.6.32.42/arch/x86/kernel/tboot.c linux-2.6.32.42/arch/x86/kernel/tboot.c
16689 --- linux-2.6.32.42/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
16690 +++ linux-2.6.32.42/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
16691 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
16692
16693 void tboot_shutdown(u32 shutdown_type)
16694 {
16695 - void (*shutdown)(void);
16696 + void (* __noreturn shutdown)(void);
16697
16698 if (!tboot_enabled())
16699 return;
16700 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
16701
16702 switch_to_tboot_pt();
16703
16704 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
16705 + shutdown = (void *)tboot->shutdown_entry;
16706 shutdown();
16707
16708 /* should not reach here */
16709 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
16710 tboot_shutdown(acpi_shutdown_map[sleep_state]);
16711 }
16712
16713 -static atomic_t ap_wfs_count;
16714 +static atomic_unchecked_t ap_wfs_count;
16715
16716 static int tboot_wait_for_aps(int num_aps)
16717 {
16718 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
16719 {
16720 switch (action) {
16721 case CPU_DYING:
16722 - atomic_inc(&ap_wfs_count);
16723 + atomic_inc_unchecked(&ap_wfs_count);
16724 if (num_online_cpus() == 1)
16725 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
16726 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
16727 return NOTIFY_BAD;
16728 break;
16729 }
16730 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
16731
16732 tboot_create_trampoline();
16733
16734 - atomic_set(&ap_wfs_count, 0);
16735 + atomic_set_unchecked(&ap_wfs_count, 0);
16736 register_hotcpu_notifier(&tboot_cpu_notifier);
16737 return 0;
16738 }
16739 diff -urNp linux-2.6.32.42/arch/x86/kernel/time.c linux-2.6.32.42/arch/x86/kernel/time.c
16740 --- linux-2.6.32.42/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
16741 +++ linux-2.6.32.42/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
16742 @@ -26,17 +26,13 @@
16743 int timer_ack;
16744 #endif
16745
16746 -#ifdef CONFIG_X86_64
16747 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
16748 -#endif
16749 -
16750 unsigned long profile_pc(struct pt_regs *regs)
16751 {
16752 unsigned long pc = instruction_pointer(regs);
16753
16754 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
16755 + if (!user_mode(regs) && in_lock_functions(pc)) {
16756 #ifdef CONFIG_FRAME_POINTER
16757 - return *(unsigned long *)(regs->bp + sizeof(long));
16758 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
16759 #else
16760 unsigned long *sp =
16761 (unsigned long *)kernel_stack_pointer(regs);
16762 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
16763 * or above a saved flags. Eflags has bits 22-31 zero,
16764 * kernel addresses don't.
16765 */
16766 +
16767 +#ifdef CONFIG_PAX_KERNEXEC
16768 + return ktla_ktva(sp[0]);
16769 +#else
16770 if (sp[0] >> 22)
16771 return sp[0];
16772 if (sp[1] >> 22)
16773 return sp[1];
16774 #endif
16775 +
16776 +#endif
16777 }
16778 return pc;
16779 }
16780 diff -urNp linux-2.6.32.42/arch/x86/kernel/tls.c linux-2.6.32.42/arch/x86/kernel/tls.c
16781 --- linux-2.6.32.42/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
16782 +++ linux-2.6.32.42/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
16783 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
16784 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
16785 return -EINVAL;
16786
16787 +#ifdef CONFIG_PAX_SEGMEXEC
16788 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
16789 + return -EINVAL;
16790 +#endif
16791 +
16792 set_tls_desc(p, idx, &info, 1);
16793
16794 return 0;
16795 diff -urNp linux-2.6.32.42/arch/x86/kernel/trampoline_32.S linux-2.6.32.42/arch/x86/kernel/trampoline_32.S
16796 --- linux-2.6.32.42/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
16797 +++ linux-2.6.32.42/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
16798 @@ -32,6 +32,12 @@
16799 #include <asm/segment.h>
16800 #include <asm/page_types.h>
16801
16802 +#ifdef CONFIG_PAX_KERNEXEC
16803 +#define ta(X) (X)
16804 +#else
16805 +#define ta(X) ((X) - __PAGE_OFFSET)
16806 +#endif
16807 +
16808 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
16809 __CPUINITRODATA
16810 .code16
16811 @@ -60,7 +66,7 @@ r_base = .
16812 inc %ax # protected mode (PE) bit
16813 lmsw %ax # into protected mode
16814 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
16815 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
16816 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
16817
16818 # These need to be in the same 64K segment as the above;
16819 # hence we don't use the boot_gdt_descr defined in head.S
16820 diff -urNp linux-2.6.32.42/arch/x86/kernel/trampoline_64.S linux-2.6.32.42/arch/x86/kernel/trampoline_64.S
16821 --- linux-2.6.32.42/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
16822 +++ linux-2.6.32.42/arch/x86/kernel/trampoline_64.S 2011-04-17 15:56:46.000000000 -0400
16823 @@ -91,7 +91,7 @@ startup_32:
16824 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
16825 movl %eax, %ds
16826
16827 - movl $X86_CR4_PAE, %eax
16828 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16829 movl %eax, %cr4 # Enable PAE mode
16830
16831 # Setup trampoline 4 level pagetables
16832 @@ -138,7 +138,7 @@ tidt:
16833 # so the kernel can live anywhere
16834 .balign 4
16835 tgdt:
16836 - .short tgdt_end - tgdt # gdt limit
16837 + .short tgdt_end - tgdt - 1 # gdt limit
16838 .long tgdt - r_base
16839 .short 0
16840 .quad 0x00cf9b000000ffff # __KERNEL32_CS
16841 diff -urNp linux-2.6.32.42/arch/x86/kernel/traps.c linux-2.6.32.42/arch/x86/kernel/traps.c
16842 --- linux-2.6.32.42/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
16843 +++ linux-2.6.32.42/arch/x86/kernel/traps.c 2011-04-17 15:56:46.000000000 -0400
16844 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
16845
16846 /* Do we ignore FPU interrupts ? */
16847 char ignore_fpu_irq;
16848 -
16849 -/*
16850 - * The IDT has to be page-aligned to simplify the Pentium
16851 - * F0 0F bug workaround.
16852 - */
16853 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
16854 #endif
16855
16856 DECLARE_BITMAP(used_vectors, NR_VECTORS);
16857 @@ -112,19 +106,19 @@ static inline void preempt_conditional_c
16858 static inline void
16859 die_if_kernel(const char *str, struct pt_regs *regs, long err)
16860 {
16861 - if (!user_mode_vm(regs))
16862 + if (!user_mode(regs))
16863 die(str, regs, err);
16864 }
16865 #endif
16866
16867 static void __kprobes
16868 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
16869 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
16870 long error_code, siginfo_t *info)
16871 {
16872 struct task_struct *tsk = current;
16873
16874 #ifdef CONFIG_X86_32
16875 - if (regs->flags & X86_VM_MASK) {
16876 + if (v8086_mode(regs)) {
16877 /*
16878 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
16879 * On nmi (interrupt 2), do_trap should not be called.
16880 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
16881 }
16882 #endif
16883
16884 - if (!user_mode(regs))
16885 + if (!user_mode_novm(regs))
16886 goto kernel_trap;
16887
16888 #ifdef CONFIG_X86_32
16889 @@ -158,7 +152,7 @@ trap_signal:
16890 printk_ratelimit()) {
16891 printk(KERN_INFO
16892 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
16893 - tsk->comm, tsk->pid, str,
16894 + tsk->comm, task_pid_nr(tsk), str,
16895 regs->ip, regs->sp, error_code);
16896 print_vma_addr(" in ", regs->ip);
16897 printk("\n");
16898 @@ -175,8 +169,20 @@ kernel_trap:
16899 if (!fixup_exception(regs)) {
16900 tsk->thread.error_code = error_code;
16901 tsk->thread.trap_no = trapnr;
16902 +
16903 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16904 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
16905 + str = "PAX: suspicious stack segment fault";
16906 +#endif
16907 +
16908 die(str, regs, error_code);
16909 }
16910 +
16911 +#ifdef CONFIG_PAX_REFCOUNT
16912 + if (trapnr == 4)
16913 + pax_report_refcount_overflow(regs);
16914 +#endif
16915 +
16916 return;
16917
16918 #ifdef CONFIG_X86_32
16919 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
16920 conditional_sti(regs);
16921
16922 #ifdef CONFIG_X86_32
16923 - if (regs->flags & X86_VM_MASK)
16924 + if (v8086_mode(regs))
16925 goto gp_in_vm86;
16926 #endif
16927
16928 tsk = current;
16929 - if (!user_mode(regs))
16930 + if (!user_mode_novm(regs))
16931 goto gp_in_kernel;
16932
16933 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
16934 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
16935 + struct mm_struct *mm = tsk->mm;
16936 + unsigned long limit;
16937 +
16938 + down_write(&mm->mmap_sem);
16939 + limit = mm->context.user_cs_limit;
16940 + if (limit < TASK_SIZE) {
16941 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
16942 + up_write(&mm->mmap_sem);
16943 + return;
16944 + }
16945 + up_write(&mm->mmap_sem);
16946 + }
16947 +#endif
16948 +
16949 tsk->thread.error_code = error_code;
16950 tsk->thread.trap_no = 13;
16951
16952 @@ -305,6 +327,13 @@ gp_in_kernel:
16953 if (notify_die(DIE_GPF, "general protection fault", regs,
16954 error_code, 13, SIGSEGV) == NOTIFY_STOP)
16955 return;
16956 +
16957 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16958 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
16959 + die("PAX: suspicious general protection fault", regs, error_code);
16960 + else
16961 +#endif
16962 +
16963 die("general protection fault", regs, error_code);
16964 }
16965
16966 @@ -558,7 +587,7 @@ dotraplinkage void __kprobes do_debug(st
16967 }
16968
16969 #ifdef CONFIG_X86_32
16970 - if (regs->flags & X86_VM_MASK)
16971 + if (v8086_mode(regs))
16972 goto debug_vm86;
16973 #endif
16974
16975 @@ -570,7 +599,7 @@ dotraplinkage void __kprobes do_debug(st
16976 * kernel space (but re-enable TF when returning to user mode).
16977 */
16978 if (condition & DR_STEP) {
16979 - if (!user_mode(regs))
16980 + if (!user_mode_novm(regs))
16981 goto clear_TF_reenable;
16982 }
16983
16984 @@ -757,7 +786,7 @@ do_simd_coprocessor_error(struct pt_regs
16985 * Handle strange cache flush from user space exception
16986 * in all other cases. This is undocumented behaviour.
16987 */
16988 - if (regs->flags & X86_VM_MASK) {
16989 + if (v8086_mode(regs)) {
16990 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
16991 return;
16992 }
16993 @@ -798,7 +827,7 @@ asmlinkage void __attribute__((weak)) sm
16994 void __math_state_restore(void)
16995 {
16996 struct thread_info *thread = current_thread_info();
16997 - struct task_struct *tsk = thread->task;
16998 + struct task_struct *tsk = current;
16999
17000 /*
17001 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17002 @@ -825,8 +854,7 @@ void __math_state_restore(void)
17003 */
17004 asmlinkage void math_state_restore(void)
17005 {
17006 - struct thread_info *thread = current_thread_info();
17007 - struct task_struct *tsk = thread->task;
17008 + struct task_struct *tsk = current;
17009
17010 if (!tsk_used_math(tsk)) {
17011 local_irq_enable();
17012 diff -urNp linux-2.6.32.42/arch/x86/kernel/vm86_32.c linux-2.6.32.42/arch/x86/kernel/vm86_32.c
17013 --- linux-2.6.32.42/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
17014 +++ linux-2.6.32.42/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
17015 @@ -41,6 +41,7 @@
17016 #include <linux/ptrace.h>
17017 #include <linux/audit.h>
17018 #include <linux/stddef.h>
17019 +#include <linux/grsecurity.h>
17020
17021 #include <asm/uaccess.h>
17022 #include <asm/io.h>
17023 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
17024 do_exit(SIGSEGV);
17025 }
17026
17027 - tss = &per_cpu(init_tss, get_cpu());
17028 + tss = init_tss + get_cpu();
17029 current->thread.sp0 = current->thread.saved_sp0;
17030 current->thread.sysenter_cs = __KERNEL_CS;
17031 load_sp0(tss, &current->thread);
17032 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
17033 struct task_struct *tsk;
17034 int tmp, ret = -EPERM;
17035
17036 +#ifdef CONFIG_GRKERNSEC_VM86
17037 + if (!capable(CAP_SYS_RAWIO)) {
17038 + gr_handle_vm86();
17039 + goto out;
17040 + }
17041 +#endif
17042 +
17043 tsk = current;
17044 if (tsk->thread.saved_sp0)
17045 goto out;
17046 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
17047 int tmp, ret;
17048 struct vm86plus_struct __user *v86;
17049
17050 +#ifdef CONFIG_GRKERNSEC_VM86
17051 + if (!capable(CAP_SYS_RAWIO)) {
17052 + gr_handle_vm86();
17053 + ret = -EPERM;
17054 + goto out;
17055 + }
17056 +#endif
17057 +
17058 tsk = current;
17059 switch (regs->bx) {
17060 case VM86_REQUEST_IRQ:
17061 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
17062 tsk->thread.saved_fs = info->regs32->fs;
17063 tsk->thread.saved_gs = get_user_gs(info->regs32);
17064
17065 - tss = &per_cpu(init_tss, get_cpu());
17066 + tss = init_tss + get_cpu();
17067 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17068 if (cpu_has_sep)
17069 tsk->thread.sysenter_cs = 0;
17070 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
17071 goto cannot_handle;
17072 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17073 goto cannot_handle;
17074 - intr_ptr = (unsigned long __user *) (i << 2);
17075 + intr_ptr = (__force unsigned long __user *) (i << 2);
17076 if (get_user(segoffs, intr_ptr))
17077 goto cannot_handle;
17078 if ((segoffs >> 16) == BIOSSEG)
17079 diff -urNp linux-2.6.32.42/arch/x86/kernel/vmi_32.c linux-2.6.32.42/arch/x86/kernel/vmi_32.c
17080 --- linux-2.6.32.42/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
17081 +++ linux-2.6.32.42/arch/x86/kernel/vmi_32.c 2011-04-17 15:56:46.000000000 -0400
17082 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
17083 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
17084
17085 #define call_vrom_func(rom,func) \
17086 - (((VROMFUNC *)(rom->func))())
17087 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
17088
17089 #define call_vrom_long_func(rom,func,arg) \
17090 - (((VROMLONGFUNC *)(rom->func)) (arg))
17091 +({\
17092 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
17093 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
17094 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
17095 + __reloc;\
17096 +})
17097
17098 -static struct vrom_header *vmi_rom;
17099 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
17100 static int disable_pge;
17101 static int disable_pse;
17102 static int disable_sep;
17103 @@ -76,10 +81,10 @@ static struct {
17104 void (*set_initial_ap_state)(int, int);
17105 void (*halt)(void);
17106 void (*set_lazy_mode)(int mode);
17107 -} vmi_ops;
17108 +} vmi_ops __read_only;
17109
17110 /* Cached VMI operations */
17111 -struct vmi_timer_ops vmi_timer_ops;
17112 +struct vmi_timer_ops vmi_timer_ops __read_only;
17113
17114 /*
17115 * VMI patching routines.
17116 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
17117 static inline void patch_offset(void *insnbuf,
17118 unsigned long ip, unsigned long dest)
17119 {
17120 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
17121 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
17122 }
17123
17124 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
17125 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
17126 {
17127 u64 reloc;
17128 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
17129 +
17130 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
17131 switch(rel->type) {
17132 case VMI_RELOCATION_CALL_REL:
17133 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
17134
17135 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
17136 {
17137 - const pte_t pte = { .pte = 0 };
17138 + const pte_t pte = __pte(0ULL);
17139 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
17140 }
17141
17142 static void vmi_pmd_clear(pmd_t *pmd)
17143 {
17144 - const pte_t pte = { .pte = 0 };
17145 + const pte_t pte = __pte(0ULL);
17146 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
17147 }
17148 #endif
17149 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
17150 ap.ss = __KERNEL_DS;
17151 ap.esp = (unsigned long) start_esp;
17152
17153 - ap.ds = __USER_DS;
17154 - ap.es = __USER_DS;
17155 + ap.ds = __KERNEL_DS;
17156 + ap.es = __KERNEL_DS;
17157 ap.fs = __KERNEL_PERCPU;
17158 - ap.gs = __KERNEL_STACK_CANARY;
17159 + savesegment(gs, ap.gs);
17160
17161 ap.eflags = 0;
17162
17163 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
17164 paravirt_leave_lazy_mmu();
17165 }
17166
17167 +#ifdef CONFIG_PAX_KERNEXEC
17168 +static unsigned long vmi_pax_open_kernel(void)
17169 +{
17170 + return 0;
17171 +}
17172 +
17173 +static unsigned long vmi_pax_close_kernel(void)
17174 +{
17175 + return 0;
17176 +}
17177 +#endif
17178 +
17179 static inline int __init check_vmi_rom(struct vrom_header *rom)
17180 {
17181 struct pci_header *pci;
17182 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
17183 return 0;
17184 if (rom->vrom_signature != VMI_SIGNATURE)
17185 return 0;
17186 + if (rom->rom_length * 512 > sizeof(*rom)) {
17187 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
17188 + return 0;
17189 + }
17190 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
17191 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
17192 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
17193 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
17194 struct vrom_header *romstart;
17195 romstart = (struct vrom_header *)isa_bus_to_virt(base);
17196 if (check_vmi_rom(romstart)) {
17197 - vmi_rom = romstart;
17198 + vmi_rom = *romstart;
17199 return 1;
17200 }
17201 }
17202 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
17203
17204 para_fill(pv_irq_ops.safe_halt, Halt);
17205
17206 +#ifdef CONFIG_PAX_KERNEXEC
17207 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
17208 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
17209 +#endif
17210 +
17211 /*
17212 * Alternative instruction rewriting doesn't happen soon enough
17213 * to convert VMI_IRET to a call instead of a jump; so we have
17214 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
17215
17216 void __init vmi_init(void)
17217 {
17218 - if (!vmi_rom)
17219 + if (!vmi_rom.rom_signature)
17220 probe_vmi_rom();
17221 else
17222 - check_vmi_rom(vmi_rom);
17223 + check_vmi_rom(&vmi_rom);
17224
17225 /* In case probing for or validating the ROM failed, basil */
17226 - if (!vmi_rom)
17227 + if (!vmi_rom.rom_signature)
17228 return;
17229
17230 - reserve_top_address(-vmi_rom->virtual_top);
17231 + reserve_top_address(-vmi_rom.virtual_top);
17232
17233 #ifdef CONFIG_X86_IO_APIC
17234 /* This is virtual hardware; timer routing is wired correctly */
17235 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
17236 {
17237 unsigned long flags;
17238
17239 - if (!vmi_rom)
17240 + if (!vmi_rom.rom_signature)
17241 return;
17242
17243 local_irq_save(flags);
17244 diff -urNp linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S
17245 --- linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
17246 +++ linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
17247 @@ -26,6 +26,13 @@
17248 #include <asm/page_types.h>
17249 #include <asm/cache.h>
17250 #include <asm/boot.h>
17251 +#include <asm/segment.h>
17252 +
17253 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17254 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17255 +#else
17256 +#define __KERNEL_TEXT_OFFSET 0
17257 +#endif
17258
17259 #undef i386 /* in case the preprocessor is a 32bit one */
17260
17261 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
17262 #ifdef CONFIG_X86_32
17263 OUTPUT_ARCH(i386)
17264 ENTRY(phys_startup_32)
17265 -jiffies = jiffies_64;
17266 #else
17267 OUTPUT_ARCH(i386:x86-64)
17268 ENTRY(phys_startup_64)
17269 -jiffies_64 = jiffies;
17270 #endif
17271
17272 PHDRS {
17273 text PT_LOAD FLAGS(5); /* R_E */
17274 - data PT_LOAD FLAGS(7); /* RWE */
17275 +#ifdef CONFIG_X86_32
17276 + module PT_LOAD FLAGS(5); /* R_E */
17277 +#endif
17278 +#ifdef CONFIG_XEN
17279 + rodata PT_LOAD FLAGS(5); /* R_E */
17280 +#else
17281 + rodata PT_LOAD FLAGS(4); /* R__ */
17282 +#endif
17283 + data PT_LOAD FLAGS(6); /* RW_ */
17284 #ifdef CONFIG_X86_64
17285 user PT_LOAD FLAGS(5); /* R_E */
17286 +#endif
17287 + init.begin PT_LOAD FLAGS(6); /* RW_ */
17288 #ifdef CONFIG_SMP
17289 percpu PT_LOAD FLAGS(6); /* RW_ */
17290 #endif
17291 + text.init PT_LOAD FLAGS(5); /* R_E */
17292 + text.exit PT_LOAD FLAGS(5); /* R_E */
17293 init PT_LOAD FLAGS(7); /* RWE */
17294 -#endif
17295 note PT_NOTE FLAGS(0); /* ___ */
17296 }
17297
17298 SECTIONS
17299 {
17300 #ifdef CONFIG_X86_32
17301 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17302 - phys_startup_32 = startup_32 - LOAD_OFFSET;
17303 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17304 #else
17305 - . = __START_KERNEL;
17306 - phys_startup_64 = startup_64 - LOAD_OFFSET;
17307 + . = __START_KERNEL;
17308 #endif
17309
17310 /* Text and read-only data */
17311 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
17312 - _text = .;
17313 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17314 /* bootstrapping code */
17315 +#ifdef CONFIG_X86_32
17316 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17317 +#else
17318 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17319 +#endif
17320 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17321 + _text = .;
17322 HEAD_TEXT
17323 #ifdef CONFIG_X86_32
17324 . = ALIGN(PAGE_SIZE);
17325 @@ -82,28 +102,71 @@ SECTIONS
17326 IRQENTRY_TEXT
17327 *(.fixup)
17328 *(.gnu.warning)
17329 - /* End of text section */
17330 - _etext = .;
17331 } :text = 0x9090
17332
17333 - NOTES :text :note
17334 + . += __KERNEL_TEXT_OFFSET;
17335 +
17336 +#ifdef CONFIG_X86_32
17337 + . = ALIGN(PAGE_SIZE);
17338 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
17339 + *(.vmi.rom)
17340 + } :module
17341 +
17342 + . = ALIGN(PAGE_SIZE);
17343 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17344 +
17345 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17346 + MODULES_EXEC_VADDR = .;
17347 + BYTE(0)
17348 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17349 + . = ALIGN(HPAGE_SIZE);
17350 + MODULES_EXEC_END = . - 1;
17351 +#endif
17352 +
17353 + } :module
17354 +#endif
17355
17356 - EXCEPTION_TABLE(16) :text = 0x9090
17357 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17358 + /* End of text section */
17359 + _etext = . - __KERNEL_TEXT_OFFSET;
17360 + }
17361 +
17362 +#ifdef CONFIG_X86_32
17363 + . = ALIGN(PAGE_SIZE);
17364 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17365 + *(.idt)
17366 + . = ALIGN(PAGE_SIZE);
17367 + *(.empty_zero_page)
17368 + *(.swapper_pg_fixmap)
17369 + *(.swapper_pg_pmd)
17370 + *(.swapper_pg_dir)
17371 + *(.trampoline_pg_dir)
17372 + } :rodata
17373 +#endif
17374 +
17375 + . = ALIGN(PAGE_SIZE);
17376 + NOTES :rodata :note
17377 +
17378 + EXCEPTION_TABLE(16) :rodata
17379
17380 RO_DATA(PAGE_SIZE)
17381
17382 /* Data */
17383 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17384 +
17385 +#ifdef CONFIG_PAX_KERNEXEC
17386 + . = ALIGN(HPAGE_SIZE);
17387 +#else
17388 + . = ALIGN(PAGE_SIZE);
17389 +#endif
17390 +
17391 /* Start of data section */
17392 _sdata = .;
17393
17394 /* init_task */
17395 INIT_TASK_DATA(THREAD_SIZE)
17396
17397 -#ifdef CONFIG_X86_32
17398 - /* 32 bit has nosave before _edata */
17399 NOSAVE_DATA
17400 -#endif
17401
17402 PAGE_ALIGNED_DATA(PAGE_SIZE)
17403
17404 @@ -112,6 +175,8 @@ SECTIONS
17405 DATA_DATA
17406 CONSTRUCTORS
17407
17408 + jiffies = jiffies_64;
17409 +
17410 /* rarely changed data like cpu maps */
17411 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
17412
17413 @@ -166,12 +231,6 @@ SECTIONS
17414 }
17415 vgetcpu_mode = VVIRT(.vgetcpu_mode);
17416
17417 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
17418 - .jiffies : AT(VLOAD(.jiffies)) {
17419 - *(.jiffies)
17420 - }
17421 - jiffies = VVIRT(.jiffies);
17422 -
17423 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
17424 *(.vsyscall_3)
17425 }
17426 @@ -187,12 +246,19 @@ SECTIONS
17427 #endif /* CONFIG_X86_64 */
17428
17429 /* Init code and data - will be freed after init */
17430 - . = ALIGN(PAGE_SIZE);
17431 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17432 + BYTE(0)
17433 +
17434 +#ifdef CONFIG_PAX_KERNEXEC
17435 + . = ALIGN(HPAGE_SIZE);
17436 +#else
17437 + . = ALIGN(PAGE_SIZE);
17438 +#endif
17439 +
17440 __init_begin = .; /* paired with __init_end */
17441 - }
17442 + } :init.begin
17443
17444 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17445 +#ifdef CONFIG_SMP
17446 /*
17447 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17448 * output PHDR, so the next output section - .init.text - should
17449 @@ -201,12 +267,27 @@ SECTIONS
17450 PERCPU_VADDR(0, :percpu)
17451 #endif
17452
17453 - INIT_TEXT_SECTION(PAGE_SIZE)
17454 -#ifdef CONFIG_X86_64
17455 - :init
17456 -#endif
17457 + . = ALIGN(PAGE_SIZE);
17458 + init_begin = .;
17459 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17460 + VMLINUX_SYMBOL(_sinittext) = .;
17461 + INIT_TEXT
17462 + VMLINUX_SYMBOL(_einittext) = .;
17463 + . = ALIGN(PAGE_SIZE);
17464 + } :text.init
17465
17466 - INIT_DATA_SECTION(16)
17467 + /*
17468 + * .exit.text is discard at runtime, not link time, to deal with
17469 + * references from .altinstructions and .eh_frame
17470 + */
17471 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17472 + EXIT_TEXT
17473 + . = ALIGN(16);
17474 + } :text.exit
17475 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
17476 +
17477 + . = ALIGN(PAGE_SIZE);
17478 + INIT_DATA_SECTION(16) :init
17479
17480 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
17481 __x86_cpu_dev_start = .;
17482 @@ -232,19 +313,11 @@ SECTIONS
17483 *(.altinstr_replacement)
17484 }
17485
17486 - /*
17487 - * .exit.text is discard at runtime, not link time, to deal with
17488 - * references from .altinstructions and .eh_frame
17489 - */
17490 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
17491 - EXIT_TEXT
17492 - }
17493 -
17494 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
17495 EXIT_DATA
17496 }
17497
17498 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
17499 +#ifndef CONFIG_SMP
17500 PERCPU(PAGE_SIZE)
17501 #endif
17502
17503 @@ -267,12 +340,6 @@ SECTIONS
17504 . = ALIGN(PAGE_SIZE);
17505 }
17506
17507 -#ifdef CONFIG_X86_64
17508 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
17509 - NOSAVE_DATA
17510 - }
17511 -#endif
17512 -
17513 /* BSS */
17514 . = ALIGN(PAGE_SIZE);
17515 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
17516 @@ -288,6 +355,7 @@ SECTIONS
17517 __brk_base = .;
17518 . += 64 * 1024; /* 64k alignment slop space */
17519 *(.brk_reservation) /* areas brk users have reserved */
17520 + . = ALIGN(HPAGE_SIZE);
17521 __brk_limit = .;
17522 }
17523
17524 @@ -316,13 +384,12 @@ SECTIONS
17525 * for the boot processor.
17526 */
17527 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
17528 -INIT_PER_CPU(gdt_page);
17529 INIT_PER_CPU(irq_stack_union);
17530
17531 /*
17532 * Build-time check on the image size:
17533 */
17534 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
17535 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
17536 "kernel image bigger than KERNEL_IMAGE_SIZE");
17537
17538 #ifdef CONFIG_SMP
17539 diff -urNp linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c
17540 --- linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
17541 +++ linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
17542 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
17543
17544 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
17545 /* copy vsyscall data */
17546 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
17547 vsyscall_gtod_data.clock.vread = clock->vread;
17548 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
17549 vsyscall_gtod_data.clock.mask = clock->mask;
17550 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
17551 We do this here because otherwise user space would do it on
17552 its own in a likely inferior way (no access to jiffies).
17553 If you don't like it pass NULL. */
17554 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
17555 + if (tcache && tcache->blob[0] == (j = jiffies)) {
17556 p = tcache->blob[1];
17557 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
17558 /* Load per CPU data from RDTSCP */
17559 diff -urNp linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c
17560 --- linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
17561 +++ linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
17562 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
17563
17564 EXPORT_SYMBOL(copy_user_generic);
17565 EXPORT_SYMBOL(__copy_user_nocache);
17566 -EXPORT_SYMBOL(copy_from_user);
17567 -EXPORT_SYMBOL(copy_to_user);
17568 EXPORT_SYMBOL(__copy_from_user_inatomic);
17569
17570 EXPORT_SYMBOL(copy_page);
17571 diff -urNp linux-2.6.32.42/arch/x86/kernel/xsave.c linux-2.6.32.42/arch/x86/kernel/xsave.c
17572 --- linux-2.6.32.42/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
17573 +++ linux-2.6.32.42/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
17574 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
17575 fx_sw_user->xstate_size > fx_sw_user->extended_size)
17576 return -1;
17577
17578 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
17579 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
17580 fx_sw_user->extended_size -
17581 FP_XSTATE_MAGIC2_SIZE));
17582 /*
17583 @@ -196,7 +196,7 @@ fx_only:
17584 * the other extended state.
17585 */
17586 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
17587 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
17588 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
17589 }
17590
17591 /*
17592 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
17593 if (task_thread_info(tsk)->status & TS_XSAVE)
17594 err = restore_user_xstate(buf);
17595 else
17596 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
17597 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
17598 buf);
17599 if (unlikely(err)) {
17600 /*
17601 diff -urNp linux-2.6.32.42/arch/x86/kvm/emulate.c linux-2.6.32.42/arch/x86/kvm/emulate.c
17602 --- linux-2.6.32.42/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
17603 +++ linux-2.6.32.42/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
17604 @@ -81,8 +81,8 @@
17605 #define Src2CL (1<<29)
17606 #define Src2ImmByte (2<<29)
17607 #define Src2One (3<<29)
17608 -#define Src2Imm16 (4<<29)
17609 -#define Src2Mask (7<<29)
17610 +#define Src2Imm16 (4U<<29)
17611 +#define Src2Mask (7U<<29)
17612
17613 enum {
17614 Group1_80, Group1_81, Group1_82, Group1_83,
17615 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
17616
17617 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
17618 do { \
17619 + unsigned long _tmp; \
17620 __asm__ __volatile__ ( \
17621 _PRE_EFLAGS("0", "4", "2") \
17622 _op _suffix " %"_x"3,%1; " \
17623 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
17624 /* Raw emulation: instruction has two explicit operands. */
17625 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
17626 do { \
17627 - unsigned long _tmp; \
17628 - \
17629 switch ((_dst).bytes) { \
17630 case 2: \
17631 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
17632 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
17633
17634 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
17635 do { \
17636 - unsigned long _tmp; \
17637 switch ((_dst).bytes) { \
17638 case 1: \
17639 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
17640 diff -urNp linux-2.6.32.42/arch/x86/kvm/lapic.c linux-2.6.32.42/arch/x86/kvm/lapic.c
17641 --- linux-2.6.32.42/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
17642 +++ linux-2.6.32.42/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
17643 @@ -52,7 +52,7 @@
17644 #define APIC_BUS_CYCLE_NS 1
17645
17646 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
17647 -#define apic_debug(fmt, arg...)
17648 +#define apic_debug(fmt, arg...) do {} while (0)
17649
17650 #define APIC_LVT_NUM 6
17651 /* 14 is the version for Xeon and Pentium 8.4.8*/
17652 diff -urNp linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h
17653 --- linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
17654 +++ linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
17655 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
17656 int level = PT_PAGE_TABLE_LEVEL;
17657 unsigned long mmu_seq;
17658
17659 + pax_track_stack();
17660 +
17661 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
17662 kvm_mmu_audit(vcpu, "pre page fault");
17663
17664 diff -urNp linux-2.6.32.42/arch/x86/kvm/svm.c linux-2.6.32.42/arch/x86/kvm/svm.c
17665 --- linux-2.6.32.42/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
17666 +++ linux-2.6.32.42/arch/x86/kvm/svm.c 2011-04-17 15:56:46.000000000 -0400
17667 @@ -2483,9 +2483,12 @@ static int handle_exit(struct kvm_run *k
17668 static void reload_tss(struct kvm_vcpu *vcpu)
17669 {
17670 int cpu = raw_smp_processor_id();
17671 -
17672 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
17673 +
17674 + pax_open_kernel();
17675 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
17676 + pax_close_kernel();
17677 +
17678 load_TR_desc();
17679 }
17680
17681 @@ -2946,7 +2949,7 @@ static bool svm_gb_page_enable(void)
17682 return true;
17683 }
17684
17685 -static struct kvm_x86_ops svm_x86_ops = {
17686 +static const struct kvm_x86_ops svm_x86_ops = {
17687 .cpu_has_kvm_support = has_svm,
17688 .disabled_by_bios = is_disabled,
17689 .hardware_setup = svm_hardware_setup,
17690 diff -urNp linux-2.6.32.42/arch/x86/kvm/vmx.c linux-2.6.32.42/arch/x86/kvm/vmx.c
17691 --- linux-2.6.32.42/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
17692 +++ linux-2.6.32.42/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
17693 @@ -570,7 +570,11 @@ static void reload_tss(void)
17694
17695 kvm_get_gdt(&gdt);
17696 descs = (void *)gdt.base;
17697 +
17698 + pax_open_kernel();
17699 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
17700 + pax_close_kernel();
17701 +
17702 load_TR_desc();
17703 }
17704
17705 @@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
17706 if (!cpu_has_vmx_flexpriority())
17707 flexpriority_enabled = 0;
17708
17709 - if (!cpu_has_vmx_tpr_shadow())
17710 - kvm_x86_ops->update_cr8_intercept = NULL;
17711 + if (!cpu_has_vmx_tpr_shadow()) {
17712 + pax_open_kernel();
17713 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
17714 + pax_close_kernel();
17715 + }
17716
17717 if (enable_ept && !cpu_has_vmx_ept_2m_page())
17718 kvm_disable_largepages();
17719 @@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
17720 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
17721
17722 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
17723 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
17724 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
17725 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
17726 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
17727 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
17728 @@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
17729 "jmp .Lkvm_vmx_return \n\t"
17730 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
17731 ".Lkvm_vmx_return: "
17732 +
17733 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17734 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
17735 + ".Lkvm_vmx_return2: "
17736 +#endif
17737 +
17738 /* Save guest registers, load host registers, keep flags */
17739 "xchg %0, (%%"R"sp) \n\t"
17740 "mov %%"R"ax, %c[rax](%0) \n\t"
17741 @@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
17742 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
17743 #endif
17744 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
17745 +
17746 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17747 + ,[cs]"i"(__KERNEL_CS)
17748 +#endif
17749 +
17750 : "cc", "memory"
17751 - , R"bx", R"di", R"si"
17752 + , R"ax", R"bx", R"di", R"si"
17753 #ifdef CONFIG_X86_64
17754 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
17755 #endif
17756 @@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
17757 if (vmx->rmode.irq.pending)
17758 fixup_rmode_irq(vmx);
17759
17760 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
17761 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
17762 +
17763 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17764 + loadsegment(fs, __KERNEL_PERCPU);
17765 +#endif
17766 +
17767 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17768 + __set_fs(current_thread_info()->addr_limit);
17769 +#endif
17770 +
17771 vmx->launched = 1;
17772
17773 vmx_complete_interrupts(vmx);
17774 @@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
17775 return false;
17776 }
17777
17778 -static struct kvm_x86_ops vmx_x86_ops = {
17779 +static const struct kvm_x86_ops vmx_x86_ops = {
17780 .cpu_has_kvm_support = cpu_has_kvm_support,
17781 .disabled_by_bios = vmx_disabled_by_bios,
17782 .hardware_setup = hardware_setup,
17783 diff -urNp linux-2.6.32.42/arch/x86/kvm/x86.c linux-2.6.32.42/arch/x86/kvm/x86.c
17784 --- linux-2.6.32.42/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
17785 +++ linux-2.6.32.42/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
17786 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
17787 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
17788 struct kvm_cpuid_entry2 __user *entries);
17789
17790 -struct kvm_x86_ops *kvm_x86_ops;
17791 +const struct kvm_x86_ops *kvm_x86_ops;
17792 EXPORT_SYMBOL_GPL(kvm_x86_ops);
17793
17794 int ignore_msrs = 0;
17795 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
17796 struct kvm_cpuid2 *cpuid,
17797 struct kvm_cpuid_entry2 __user *entries)
17798 {
17799 - int r;
17800 + int r, i;
17801
17802 r = -E2BIG;
17803 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
17804 goto out;
17805 r = -EFAULT;
17806 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
17807 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17808 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17809 goto out;
17810 + for (i = 0; i < cpuid->nent; ++i) {
17811 + struct kvm_cpuid_entry2 cpuid_entry;
17812 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
17813 + goto out;
17814 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
17815 + }
17816 vcpu->arch.cpuid_nent = cpuid->nent;
17817 kvm_apic_set_version(vcpu);
17818 return 0;
17819 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
17820 struct kvm_cpuid2 *cpuid,
17821 struct kvm_cpuid_entry2 __user *entries)
17822 {
17823 - int r;
17824 + int r, i;
17825
17826 vcpu_load(vcpu);
17827 r = -E2BIG;
17828 if (cpuid->nent < vcpu->arch.cpuid_nent)
17829 goto out;
17830 r = -EFAULT;
17831 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
17832 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17833 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17834 goto out;
17835 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
17836 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
17837 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
17838 + goto out;
17839 + }
17840 return 0;
17841
17842 out:
17843 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
17844 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
17845 struct kvm_interrupt *irq)
17846 {
17847 - if (irq->irq < 0 || irq->irq >= 256)
17848 + if (irq->irq >= 256)
17849 return -EINVAL;
17850 if (irqchip_in_kernel(vcpu->kvm))
17851 return -ENXIO;
17852 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
17853 .notifier_call = kvmclock_cpufreq_notifier
17854 };
17855
17856 -int kvm_arch_init(void *opaque)
17857 +int kvm_arch_init(const void *opaque)
17858 {
17859 int r, cpu;
17860 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
17861 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
17862
17863 if (kvm_x86_ops) {
17864 printk(KERN_ERR "kvm: already loaded the other module\n");
17865 diff -urNp linux-2.6.32.42/arch/x86/lib/atomic64_32.c linux-2.6.32.42/arch/x86/lib/atomic64_32.c
17866 --- linux-2.6.32.42/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
17867 +++ linux-2.6.32.42/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
17868 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
17869 }
17870 EXPORT_SYMBOL(atomic64_cmpxchg);
17871
17872 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
17873 +{
17874 + return cmpxchg8b(&ptr->counter, old_val, new_val);
17875 +}
17876 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
17877 +
17878 /**
17879 * atomic64_xchg - xchg atomic64 variable
17880 * @ptr: pointer to type atomic64_t
17881 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
17882 EXPORT_SYMBOL(atomic64_xchg);
17883
17884 /**
17885 + * atomic64_xchg_unchecked - xchg atomic64 variable
17886 + * @ptr: pointer to type atomic64_unchecked_t
17887 + * @new_val: value to assign
17888 + *
17889 + * Atomically xchgs the value of @ptr to @new_val and returns
17890 + * the old value.
17891 + */
17892 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
17893 +{
17894 + /*
17895 + * Try first with a (possibly incorrect) assumption about
17896 + * what we have there. We'll do two loops most likely,
17897 + * but we'll get an ownership MESI transaction straight away
17898 + * instead of a read transaction followed by a
17899 + * flush-for-ownership transaction:
17900 + */
17901 + u64 old_val, real_val = 0;
17902 +
17903 + do {
17904 + old_val = real_val;
17905 +
17906 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
17907 +
17908 + } while (real_val != old_val);
17909 +
17910 + return old_val;
17911 +}
17912 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
17913 +
17914 +/**
17915 * atomic64_set - set atomic64 variable
17916 * @ptr: pointer to type atomic64_t
17917 * @new_val: value to assign
17918 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
17919 EXPORT_SYMBOL(atomic64_set);
17920
17921 /**
17922 -EXPORT_SYMBOL(atomic64_read);
17923 + * atomic64_unchecked_set - set atomic64 variable
17924 + * @ptr: pointer to type atomic64_unchecked_t
17925 + * @new_val: value to assign
17926 + *
17927 + * Atomically sets the value of @ptr to @new_val.
17928 + */
17929 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
17930 +{
17931 + atomic64_xchg_unchecked(ptr, new_val);
17932 +}
17933 +EXPORT_SYMBOL(atomic64_set_unchecked);
17934 +
17935 +/**
17936 * atomic64_add_return - add and return
17937 * @delta: integer value to add
17938 * @ptr: pointer to type atomic64_t
17939 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
17940 }
17941 EXPORT_SYMBOL(atomic64_add_return);
17942
17943 +/**
17944 + * atomic64_add_return_unchecked - add and return
17945 + * @delta: integer value to add
17946 + * @ptr: pointer to type atomic64_unchecked_t
17947 + *
17948 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
17949 + */
17950 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17951 +{
17952 + /*
17953 + * Try first with a (possibly incorrect) assumption about
17954 + * what we have there. We'll do two loops most likely,
17955 + * but we'll get an ownership MESI transaction straight away
17956 + * instead of a read transaction followed by a
17957 + * flush-for-ownership transaction:
17958 + */
17959 + u64 old_val, new_val, real_val = 0;
17960 +
17961 + do {
17962 + old_val = real_val;
17963 + new_val = old_val + delta;
17964 +
17965 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
17966 +
17967 + } while (real_val != old_val);
17968 +
17969 + return new_val;
17970 +}
17971 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
17972 +
17973 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
17974 {
17975 return atomic64_add_return(-delta, ptr);
17976 }
17977 EXPORT_SYMBOL(atomic64_sub_return);
17978
17979 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17980 +{
17981 + return atomic64_add_return_unchecked(-delta, ptr);
17982 +}
17983 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
17984 +
17985 u64 atomic64_inc_return(atomic64_t *ptr)
17986 {
17987 return atomic64_add_return(1, ptr);
17988 }
17989 EXPORT_SYMBOL(atomic64_inc_return);
17990
17991 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
17992 +{
17993 + return atomic64_add_return_unchecked(1, ptr);
17994 +}
17995 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
17996 +
17997 u64 atomic64_dec_return(atomic64_t *ptr)
17998 {
17999 return atomic64_sub_return(1, ptr);
18000 }
18001 EXPORT_SYMBOL(atomic64_dec_return);
18002
18003 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
18004 +{
18005 + return atomic64_sub_return_unchecked(1, ptr);
18006 +}
18007 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
18008 +
18009 /**
18010 * atomic64_add - add integer to atomic64 variable
18011 * @delta: integer value to add
18012 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
18013 EXPORT_SYMBOL(atomic64_add);
18014
18015 /**
18016 + * atomic64_add_unchecked - add integer to atomic64 variable
18017 + * @delta: integer value to add
18018 + * @ptr: pointer to type atomic64_unchecked_t
18019 + *
18020 + * Atomically adds @delta to @ptr.
18021 + */
18022 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18023 +{
18024 + atomic64_add_return_unchecked(delta, ptr);
18025 +}
18026 +EXPORT_SYMBOL(atomic64_add_unchecked);
18027 +
18028 +/**
18029 * atomic64_sub - subtract the atomic64 variable
18030 * @delta: integer value to subtract
18031 * @ptr: pointer to type atomic64_t
18032 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
18033 EXPORT_SYMBOL(atomic64_sub);
18034
18035 /**
18036 + * atomic64_sub_unchecked - subtract the atomic64 variable
18037 + * @delta: integer value to subtract
18038 + * @ptr: pointer to type atomic64_unchecked_t
18039 + *
18040 + * Atomically subtracts @delta from @ptr.
18041 + */
18042 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18043 +{
18044 + atomic64_add_unchecked(-delta, ptr);
18045 +}
18046 +EXPORT_SYMBOL(atomic64_sub_unchecked);
18047 +
18048 +/**
18049 * atomic64_sub_and_test - subtract value from variable and test result
18050 * @delta: integer value to subtract
18051 * @ptr: pointer to type atomic64_t
18052 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
18053 EXPORT_SYMBOL(atomic64_inc);
18054
18055 /**
18056 + * atomic64_inc_unchecked - increment atomic64 variable
18057 + * @ptr: pointer to type atomic64_unchecked_t
18058 + *
18059 + * Atomically increments @ptr by 1.
18060 + */
18061 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
18062 +{
18063 + atomic64_add_unchecked(1, ptr);
18064 +}
18065 +EXPORT_SYMBOL(atomic64_inc_unchecked);
18066 +
18067 +/**
18068 * atomic64_dec - decrement atomic64 variable
18069 * @ptr: pointer to type atomic64_t
18070 *
18071 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
18072 EXPORT_SYMBOL(atomic64_dec);
18073
18074 /**
18075 + * atomic64_dec_unchecked - decrement atomic64 variable
18076 + * @ptr: pointer to type atomic64_unchecked_t
18077 + *
18078 + * Atomically decrements @ptr by 1.
18079 + */
18080 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
18081 +{
18082 + atomic64_sub_unchecked(1, ptr);
18083 +}
18084 +EXPORT_SYMBOL(atomic64_dec_unchecked);
18085 +
18086 +/**
18087 * atomic64_dec_and_test - decrement and test
18088 * @ptr: pointer to type atomic64_t
18089 *
18090 diff -urNp linux-2.6.32.42/arch/x86/lib/checksum_32.S linux-2.6.32.42/arch/x86/lib/checksum_32.S
18091 --- linux-2.6.32.42/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
18092 +++ linux-2.6.32.42/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
18093 @@ -28,7 +28,8 @@
18094 #include <linux/linkage.h>
18095 #include <asm/dwarf2.h>
18096 #include <asm/errno.h>
18097 -
18098 +#include <asm/segment.h>
18099 +
18100 /*
18101 * computes a partial checksum, e.g. for TCP/UDP fragments
18102 */
18103 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
18104
18105 #define ARGBASE 16
18106 #define FP 12
18107 -
18108 -ENTRY(csum_partial_copy_generic)
18109 +
18110 +ENTRY(csum_partial_copy_generic_to_user)
18111 CFI_STARTPROC
18112 +
18113 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18114 + pushl %gs
18115 + CFI_ADJUST_CFA_OFFSET 4
18116 + popl %es
18117 + CFI_ADJUST_CFA_OFFSET -4
18118 + jmp csum_partial_copy_generic
18119 +#endif
18120 +
18121 +ENTRY(csum_partial_copy_generic_from_user)
18122 +
18123 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18124 + pushl %gs
18125 + CFI_ADJUST_CFA_OFFSET 4
18126 + popl %ds
18127 + CFI_ADJUST_CFA_OFFSET -4
18128 +#endif
18129 +
18130 +ENTRY(csum_partial_copy_generic)
18131 subl $4,%esp
18132 CFI_ADJUST_CFA_OFFSET 4
18133 pushl %edi
18134 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
18135 jmp 4f
18136 SRC(1: movw (%esi), %bx )
18137 addl $2, %esi
18138 -DST( movw %bx, (%edi) )
18139 +DST( movw %bx, %es:(%edi) )
18140 addl $2, %edi
18141 addw %bx, %ax
18142 adcl $0, %eax
18143 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
18144 SRC(1: movl (%esi), %ebx )
18145 SRC( movl 4(%esi), %edx )
18146 adcl %ebx, %eax
18147 -DST( movl %ebx, (%edi) )
18148 +DST( movl %ebx, %es:(%edi) )
18149 adcl %edx, %eax
18150 -DST( movl %edx, 4(%edi) )
18151 +DST( movl %edx, %es:4(%edi) )
18152
18153 SRC( movl 8(%esi), %ebx )
18154 SRC( movl 12(%esi), %edx )
18155 adcl %ebx, %eax
18156 -DST( movl %ebx, 8(%edi) )
18157 +DST( movl %ebx, %es:8(%edi) )
18158 adcl %edx, %eax
18159 -DST( movl %edx, 12(%edi) )
18160 +DST( movl %edx, %es:12(%edi) )
18161
18162 SRC( movl 16(%esi), %ebx )
18163 SRC( movl 20(%esi), %edx )
18164 adcl %ebx, %eax
18165 -DST( movl %ebx, 16(%edi) )
18166 +DST( movl %ebx, %es:16(%edi) )
18167 adcl %edx, %eax
18168 -DST( movl %edx, 20(%edi) )
18169 +DST( movl %edx, %es:20(%edi) )
18170
18171 SRC( movl 24(%esi), %ebx )
18172 SRC( movl 28(%esi), %edx )
18173 adcl %ebx, %eax
18174 -DST( movl %ebx, 24(%edi) )
18175 +DST( movl %ebx, %es:24(%edi) )
18176 adcl %edx, %eax
18177 -DST( movl %edx, 28(%edi) )
18178 +DST( movl %edx, %es:28(%edi) )
18179
18180 lea 32(%esi), %esi
18181 lea 32(%edi), %edi
18182 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
18183 shrl $2, %edx # This clears CF
18184 SRC(3: movl (%esi), %ebx )
18185 adcl %ebx, %eax
18186 -DST( movl %ebx, (%edi) )
18187 +DST( movl %ebx, %es:(%edi) )
18188 lea 4(%esi), %esi
18189 lea 4(%edi), %edi
18190 dec %edx
18191 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
18192 jb 5f
18193 SRC( movw (%esi), %cx )
18194 leal 2(%esi), %esi
18195 -DST( movw %cx, (%edi) )
18196 +DST( movw %cx, %es:(%edi) )
18197 leal 2(%edi), %edi
18198 je 6f
18199 shll $16,%ecx
18200 SRC(5: movb (%esi), %cl )
18201 -DST( movb %cl, (%edi) )
18202 +DST( movb %cl, %es:(%edi) )
18203 6: addl %ecx, %eax
18204 adcl $0, %eax
18205 7:
18206 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
18207
18208 6001:
18209 movl ARGBASE+20(%esp), %ebx # src_err_ptr
18210 - movl $-EFAULT, (%ebx)
18211 + movl $-EFAULT, %ss:(%ebx)
18212
18213 # zero the complete destination - computing the rest
18214 # is too much work
18215 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
18216
18217 6002:
18218 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18219 - movl $-EFAULT,(%ebx)
18220 + movl $-EFAULT,%ss:(%ebx)
18221 jmp 5000b
18222
18223 .previous
18224
18225 + pushl %ss
18226 + CFI_ADJUST_CFA_OFFSET 4
18227 + popl %ds
18228 + CFI_ADJUST_CFA_OFFSET -4
18229 + pushl %ss
18230 + CFI_ADJUST_CFA_OFFSET 4
18231 + popl %es
18232 + CFI_ADJUST_CFA_OFFSET -4
18233 popl %ebx
18234 CFI_ADJUST_CFA_OFFSET -4
18235 CFI_RESTORE ebx
18236 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
18237 CFI_ADJUST_CFA_OFFSET -4
18238 ret
18239 CFI_ENDPROC
18240 -ENDPROC(csum_partial_copy_generic)
18241 +ENDPROC(csum_partial_copy_generic_to_user)
18242
18243 #else
18244
18245 /* Version for PentiumII/PPro */
18246
18247 #define ROUND1(x) \
18248 + nop; nop; nop; \
18249 SRC(movl x(%esi), %ebx ) ; \
18250 addl %ebx, %eax ; \
18251 - DST(movl %ebx, x(%edi) ) ;
18252 + DST(movl %ebx, %es:x(%edi)) ;
18253
18254 #define ROUND(x) \
18255 + nop; nop; nop; \
18256 SRC(movl x(%esi), %ebx ) ; \
18257 adcl %ebx, %eax ; \
18258 - DST(movl %ebx, x(%edi) ) ;
18259 + DST(movl %ebx, %es:x(%edi)) ;
18260
18261 #define ARGBASE 12
18262 -
18263 -ENTRY(csum_partial_copy_generic)
18264 +
18265 +ENTRY(csum_partial_copy_generic_to_user)
18266 CFI_STARTPROC
18267 +
18268 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18269 + pushl %gs
18270 + CFI_ADJUST_CFA_OFFSET 4
18271 + popl %es
18272 + CFI_ADJUST_CFA_OFFSET -4
18273 + jmp csum_partial_copy_generic
18274 +#endif
18275 +
18276 +ENTRY(csum_partial_copy_generic_from_user)
18277 +
18278 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18279 + pushl %gs
18280 + CFI_ADJUST_CFA_OFFSET 4
18281 + popl %ds
18282 + CFI_ADJUST_CFA_OFFSET -4
18283 +#endif
18284 +
18285 +ENTRY(csum_partial_copy_generic)
18286 pushl %ebx
18287 CFI_ADJUST_CFA_OFFSET 4
18288 CFI_REL_OFFSET ebx, 0
18289 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
18290 subl %ebx, %edi
18291 lea -1(%esi),%edx
18292 andl $-32,%edx
18293 - lea 3f(%ebx,%ebx), %ebx
18294 + lea 3f(%ebx,%ebx,2), %ebx
18295 testl %esi, %esi
18296 jmp *%ebx
18297 1: addl $64,%esi
18298 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
18299 jb 5f
18300 SRC( movw (%esi), %dx )
18301 leal 2(%esi), %esi
18302 -DST( movw %dx, (%edi) )
18303 +DST( movw %dx, %es:(%edi) )
18304 leal 2(%edi), %edi
18305 je 6f
18306 shll $16,%edx
18307 5:
18308 SRC( movb (%esi), %dl )
18309 -DST( movb %dl, (%edi) )
18310 +DST( movb %dl, %es:(%edi) )
18311 6: addl %edx, %eax
18312 adcl $0, %eax
18313 7:
18314 .section .fixup, "ax"
18315 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
18316 - movl $-EFAULT, (%ebx)
18317 + movl $-EFAULT, %ss:(%ebx)
18318 # zero the complete destination (computing the rest is too much work)
18319 movl ARGBASE+8(%esp),%edi # dst
18320 movl ARGBASE+12(%esp),%ecx # len
18321 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
18322 rep; stosb
18323 jmp 7b
18324 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18325 - movl $-EFAULT, (%ebx)
18326 + movl $-EFAULT, %ss:(%ebx)
18327 jmp 7b
18328 .previous
18329
18330 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18331 + pushl %ss
18332 + CFI_ADJUST_CFA_OFFSET 4
18333 + popl %ds
18334 + CFI_ADJUST_CFA_OFFSET -4
18335 + pushl %ss
18336 + CFI_ADJUST_CFA_OFFSET 4
18337 + popl %es
18338 + CFI_ADJUST_CFA_OFFSET -4
18339 +#endif
18340 +
18341 popl %esi
18342 CFI_ADJUST_CFA_OFFSET -4
18343 CFI_RESTORE esi
18344 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
18345 CFI_RESTORE ebx
18346 ret
18347 CFI_ENDPROC
18348 -ENDPROC(csum_partial_copy_generic)
18349 +ENDPROC(csum_partial_copy_generic_to_user)
18350
18351 #undef ROUND
18352 #undef ROUND1
18353 diff -urNp linux-2.6.32.42/arch/x86/lib/clear_page_64.S linux-2.6.32.42/arch/x86/lib/clear_page_64.S
18354 --- linux-2.6.32.42/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
18355 +++ linux-2.6.32.42/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
18356 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
18357
18358 #include <asm/cpufeature.h>
18359
18360 - .section .altinstr_replacement,"ax"
18361 + .section .altinstr_replacement,"a"
18362 1: .byte 0xeb /* jmp <disp8> */
18363 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
18364 2:
18365 diff -urNp linux-2.6.32.42/arch/x86/lib/copy_page_64.S linux-2.6.32.42/arch/x86/lib/copy_page_64.S
18366 --- linux-2.6.32.42/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
18367 +++ linux-2.6.32.42/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
18368 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
18369
18370 #include <asm/cpufeature.h>
18371
18372 - .section .altinstr_replacement,"ax"
18373 + .section .altinstr_replacement,"a"
18374 1: .byte 0xeb /* jmp <disp8> */
18375 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
18376 2:
18377 diff -urNp linux-2.6.32.42/arch/x86/lib/copy_user_64.S linux-2.6.32.42/arch/x86/lib/copy_user_64.S
18378 --- linux-2.6.32.42/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
18379 +++ linux-2.6.32.42/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
18380 @@ -15,13 +15,14 @@
18381 #include <asm/asm-offsets.h>
18382 #include <asm/thread_info.h>
18383 #include <asm/cpufeature.h>
18384 +#include <asm/pgtable.h>
18385
18386 .macro ALTERNATIVE_JUMP feature,orig,alt
18387 0:
18388 .byte 0xe9 /* 32bit jump */
18389 .long \orig-1f /* by default jump to orig */
18390 1:
18391 - .section .altinstr_replacement,"ax"
18392 + .section .altinstr_replacement,"a"
18393 2: .byte 0xe9 /* near jump with 32bit immediate */
18394 .long \alt-1b /* offset */ /* or alternatively to alt */
18395 .previous
18396 @@ -64,49 +65,19 @@
18397 #endif
18398 .endm
18399
18400 -/* Standard copy_to_user with segment limit checking */
18401 -ENTRY(copy_to_user)
18402 - CFI_STARTPROC
18403 - GET_THREAD_INFO(%rax)
18404 - movq %rdi,%rcx
18405 - addq %rdx,%rcx
18406 - jc bad_to_user
18407 - cmpq TI_addr_limit(%rax),%rcx
18408 - ja bad_to_user
18409 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18410 - CFI_ENDPROC
18411 -ENDPROC(copy_to_user)
18412 -
18413 -/* Standard copy_from_user with segment limit checking */
18414 -ENTRY(copy_from_user)
18415 - CFI_STARTPROC
18416 - GET_THREAD_INFO(%rax)
18417 - movq %rsi,%rcx
18418 - addq %rdx,%rcx
18419 - jc bad_from_user
18420 - cmpq TI_addr_limit(%rax),%rcx
18421 - ja bad_from_user
18422 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18423 - CFI_ENDPROC
18424 -ENDPROC(copy_from_user)
18425 -
18426 ENTRY(copy_user_generic)
18427 CFI_STARTPROC
18428 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18429 CFI_ENDPROC
18430 ENDPROC(copy_user_generic)
18431
18432 -ENTRY(__copy_from_user_inatomic)
18433 - CFI_STARTPROC
18434 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18435 - CFI_ENDPROC
18436 -ENDPROC(__copy_from_user_inatomic)
18437 -
18438 .section .fixup,"ax"
18439 /* must zero dest */
18440 ENTRY(bad_from_user)
18441 bad_from_user:
18442 CFI_STARTPROC
18443 + testl %edx,%edx
18444 + js bad_to_user
18445 movl %edx,%ecx
18446 xorl %eax,%eax
18447 rep
18448 diff -urNp linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S
18449 --- linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
18450 +++ linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
18451 @@ -14,6 +14,7 @@
18452 #include <asm/current.h>
18453 #include <asm/asm-offsets.h>
18454 #include <asm/thread_info.h>
18455 +#include <asm/pgtable.h>
18456
18457 .macro ALIGN_DESTINATION
18458 #ifdef FIX_ALIGNMENT
18459 @@ -50,6 +51,15 @@
18460 */
18461 ENTRY(__copy_user_nocache)
18462 CFI_STARTPROC
18463 +
18464 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18465 + mov $PAX_USER_SHADOW_BASE,%rcx
18466 + cmp %rcx,%rsi
18467 + jae 1f
18468 + add %rcx,%rsi
18469 +1:
18470 +#endif
18471 +
18472 cmpl $8,%edx
18473 jb 20f /* less then 8 bytes, go to byte copy loop */
18474 ALIGN_DESTINATION
18475 diff -urNp linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c
18476 --- linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
18477 +++ linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
18478 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
18479 len -= 2;
18480 }
18481 }
18482 +
18483 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18484 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18485 + src += PAX_USER_SHADOW_BASE;
18486 +#endif
18487 +
18488 isum = csum_partial_copy_generic((__force const void *)src,
18489 dst, len, isum, errp, NULL);
18490 if (unlikely(*errp))
18491 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
18492 }
18493
18494 *errp = 0;
18495 +
18496 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18497 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
18498 + dst += PAX_USER_SHADOW_BASE;
18499 +#endif
18500 +
18501 return csum_partial_copy_generic(src, (void __force *)dst,
18502 len, isum, NULL, errp);
18503 }
18504 diff -urNp linux-2.6.32.42/arch/x86/lib/getuser.S linux-2.6.32.42/arch/x86/lib/getuser.S
18505 --- linux-2.6.32.42/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
18506 +++ linux-2.6.32.42/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
18507 @@ -33,14 +33,35 @@
18508 #include <asm/asm-offsets.h>
18509 #include <asm/thread_info.h>
18510 #include <asm/asm.h>
18511 +#include <asm/segment.h>
18512 +#include <asm/pgtable.h>
18513 +
18514 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18515 +#define __copyuser_seg gs;
18516 +#else
18517 +#define __copyuser_seg
18518 +#endif
18519
18520 .text
18521 ENTRY(__get_user_1)
18522 CFI_STARTPROC
18523 +
18524 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18525 GET_THREAD_INFO(%_ASM_DX)
18526 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18527 jae bad_get_user
18528 -1: movzb (%_ASM_AX),%edx
18529 +
18530 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18531 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18532 + cmp %_ASM_DX,%_ASM_AX
18533 + jae 1234f
18534 + add %_ASM_DX,%_ASM_AX
18535 +1234:
18536 +#endif
18537 +
18538 +#endif
18539 +
18540 +1: __copyuser_seg movzb (%_ASM_AX),%edx
18541 xor %eax,%eax
18542 ret
18543 CFI_ENDPROC
18544 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
18545 ENTRY(__get_user_2)
18546 CFI_STARTPROC
18547 add $1,%_ASM_AX
18548 +
18549 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18550 jc bad_get_user
18551 GET_THREAD_INFO(%_ASM_DX)
18552 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18553 jae bad_get_user
18554 -2: movzwl -1(%_ASM_AX),%edx
18555 +
18556 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18557 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18558 + cmp %_ASM_DX,%_ASM_AX
18559 + jae 1234f
18560 + add %_ASM_DX,%_ASM_AX
18561 +1234:
18562 +#endif
18563 +
18564 +#endif
18565 +
18566 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
18567 xor %eax,%eax
18568 ret
18569 CFI_ENDPROC
18570 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
18571 ENTRY(__get_user_4)
18572 CFI_STARTPROC
18573 add $3,%_ASM_AX
18574 +
18575 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18576 jc bad_get_user
18577 GET_THREAD_INFO(%_ASM_DX)
18578 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18579 jae bad_get_user
18580 -3: mov -3(%_ASM_AX),%edx
18581 +
18582 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18583 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18584 + cmp %_ASM_DX,%_ASM_AX
18585 + jae 1234f
18586 + add %_ASM_DX,%_ASM_AX
18587 +1234:
18588 +#endif
18589 +
18590 +#endif
18591 +
18592 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
18593 xor %eax,%eax
18594 ret
18595 CFI_ENDPROC
18596 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
18597 GET_THREAD_INFO(%_ASM_DX)
18598 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18599 jae bad_get_user
18600 +
18601 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18602 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18603 + cmp %_ASM_DX,%_ASM_AX
18604 + jae 1234f
18605 + add %_ASM_DX,%_ASM_AX
18606 +1234:
18607 +#endif
18608 +
18609 4: movq -7(%_ASM_AX),%_ASM_DX
18610 xor %eax,%eax
18611 ret
18612 diff -urNp linux-2.6.32.42/arch/x86/lib/memcpy_64.S linux-2.6.32.42/arch/x86/lib/memcpy_64.S
18613 --- linux-2.6.32.42/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
18614 +++ linux-2.6.32.42/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
18615 @@ -128,7 +128,7 @@ ENDPROC(__memcpy)
18616 * It is also a lot simpler. Use this when possible:
18617 */
18618
18619 - .section .altinstr_replacement, "ax"
18620 + .section .altinstr_replacement, "a"
18621 1: .byte 0xeb /* jmp <disp8> */
18622 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
18623 2:
18624 diff -urNp linux-2.6.32.42/arch/x86/lib/memset_64.S linux-2.6.32.42/arch/x86/lib/memset_64.S
18625 --- linux-2.6.32.42/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
18626 +++ linux-2.6.32.42/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
18627 @@ -118,7 +118,7 @@ ENDPROC(__memset)
18628
18629 #include <asm/cpufeature.h>
18630
18631 - .section .altinstr_replacement,"ax"
18632 + .section .altinstr_replacement,"a"
18633 1: .byte 0xeb /* jmp <disp8> */
18634 .byte (memset_c - memset) - (2f - 1b) /* offset */
18635 2:
18636 diff -urNp linux-2.6.32.42/arch/x86/lib/mmx_32.c linux-2.6.32.42/arch/x86/lib/mmx_32.c
18637 --- linux-2.6.32.42/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
18638 +++ linux-2.6.32.42/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
18639 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
18640 {
18641 void *p;
18642 int i;
18643 + unsigned long cr0;
18644
18645 if (unlikely(in_interrupt()))
18646 return __memcpy(to, from, len);
18647 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
18648 kernel_fpu_begin();
18649
18650 __asm__ __volatile__ (
18651 - "1: prefetch (%0)\n" /* This set is 28 bytes */
18652 - " prefetch 64(%0)\n"
18653 - " prefetch 128(%0)\n"
18654 - " prefetch 192(%0)\n"
18655 - " prefetch 256(%0)\n"
18656 + "1: prefetch (%1)\n" /* This set is 28 bytes */
18657 + " prefetch 64(%1)\n"
18658 + " prefetch 128(%1)\n"
18659 + " prefetch 192(%1)\n"
18660 + " prefetch 256(%1)\n"
18661 "2: \n"
18662 ".section .fixup, \"ax\"\n"
18663 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18664 + "3: \n"
18665 +
18666 +#ifdef CONFIG_PAX_KERNEXEC
18667 + " movl %%cr0, %0\n"
18668 + " movl %0, %%eax\n"
18669 + " andl $0xFFFEFFFF, %%eax\n"
18670 + " movl %%eax, %%cr0\n"
18671 +#endif
18672 +
18673 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18674 +
18675 +#ifdef CONFIG_PAX_KERNEXEC
18676 + " movl %0, %%cr0\n"
18677 +#endif
18678 +
18679 " jmp 2b\n"
18680 ".previous\n"
18681 _ASM_EXTABLE(1b, 3b)
18682 - : : "r" (from));
18683 + : "=&r" (cr0) : "r" (from) : "ax");
18684
18685 for ( ; i > 5; i--) {
18686 __asm__ __volatile__ (
18687 - "1: prefetch 320(%0)\n"
18688 - "2: movq (%0), %%mm0\n"
18689 - " movq 8(%0), %%mm1\n"
18690 - " movq 16(%0), %%mm2\n"
18691 - " movq 24(%0), %%mm3\n"
18692 - " movq %%mm0, (%1)\n"
18693 - " movq %%mm1, 8(%1)\n"
18694 - " movq %%mm2, 16(%1)\n"
18695 - " movq %%mm3, 24(%1)\n"
18696 - " movq 32(%0), %%mm0\n"
18697 - " movq 40(%0), %%mm1\n"
18698 - " movq 48(%0), %%mm2\n"
18699 - " movq 56(%0), %%mm3\n"
18700 - " movq %%mm0, 32(%1)\n"
18701 - " movq %%mm1, 40(%1)\n"
18702 - " movq %%mm2, 48(%1)\n"
18703 - " movq %%mm3, 56(%1)\n"
18704 + "1: prefetch 320(%1)\n"
18705 + "2: movq (%1), %%mm0\n"
18706 + " movq 8(%1), %%mm1\n"
18707 + " movq 16(%1), %%mm2\n"
18708 + " movq 24(%1), %%mm3\n"
18709 + " movq %%mm0, (%2)\n"
18710 + " movq %%mm1, 8(%2)\n"
18711 + " movq %%mm2, 16(%2)\n"
18712 + " movq %%mm3, 24(%2)\n"
18713 + " movq 32(%1), %%mm0\n"
18714 + " movq 40(%1), %%mm1\n"
18715 + " movq 48(%1), %%mm2\n"
18716 + " movq 56(%1), %%mm3\n"
18717 + " movq %%mm0, 32(%2)\n"
18718 + " movq %%mm1, 40(%2)\n"
18719 + " movq %%mm2, 48(%2)\n"
18720 + " movq %%mm3, 56(%2)\n"
18721 ".section .fixup, \"ax\"\n"
18722 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18723 + "3:\n"
18724 +
18725 +#ifdef CONFIG_PAX_KERNEXEC
18726 + " movl %%cr0, %0\n"
18727 + " movl %0, %%eax\n"
18728 + " andl $0xFFFEFFFF, %%eax\n"
18729 + " movl %%eax, %%cr0\n"
18730 +#endif
18731 +
18732 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18733 +
18734 +#ifdef CONFIG_PAX_KERNEXEC
18735 + " movl %0, %%cr0\n"
18736 +#endif
18737 +
18738 " jmp 2b\n"
18739 ".previous\n"
18740 _ASM_EXTABLE(1b, 3b)
18741 - : : "r" (from), "r" (to) : "memory");
18742 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18743
18744 from += 64;
18745 to += 64;
18746 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
18747 static void fast_copy_page(void *to, void *from)
18748 {
18749 int i;
18750 + unsigned long cr0;
18751
18752 kernel_fpu_begin();
18753
18754 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
18755 * but that is for later. -AV
18756 */
18757 __asm__ __volatile__(
18758 - "1: prefetch (%0)\n"
18759 - " prefetch 64(%0)\n"
18760 - " prefetch 128(%0)\n"
18761 - " prefetch 192(%0)\n"
18762 - " prefetch 256(%0)\n"
18763 + "1: prefetch (%1)\n"
18764 + " prefetch 64(%1)\n"
18765 + " prefetch 128(%1)\n"
18766 + " prefetch 192(%1)\n"
18767 + " prefetch 256(%1)\n"
18768 "2: \n"
18769 ".section .fixup, \"ax\"\n"
18770 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18771 + "3: \n"
18772 +
18773 +#ifdef CONFIG_PAX_KERNEXEC
18774 + " movl %%cr0, %0\n"
18775 + " movl %0, %%eax\n"
18776 + " andl $0xFFFEFFFF, %%eax\n"
18777 + " movl %%eax, %%cr0\n"
18778 +#endif
18779 +
18780 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18781 +
18782 +#ifdef CONFIG_PAX_KERNEXEC
18783 + " movl %0, %%cr0\n"
18784 +#endif
18785 +
18786 " jmp 2b\n"
18787 ".previous\n"
18788 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
18789 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18790
18791 for (i = 0; i < (4096-320)/64; i++) {
18792 __asm__ __volatile__ (
18793 - "1: prefetch 320(%0)\n"
18794 - "2: movq (%0), %%mm0\n"
18795 - " movntq %%mm0, (%1)\n"
18796 - " movq 8(%0), %%mm1\n"
18797 - " movntq %%mm1, 8(%1)\n"
18798 - " movq 16(%0), %%mm2\n"
18799 - " movntq %%mm2, 16(%1)\n"
18800 - " movq 24(%0), %%mm3\n"
18801 - " movntq %%mm3, 24(%1)\n"
18802 - " movq 32(%0), %%mm4\n"
18803 - " movntq %%mm4, 32(%1)\n"
18804 - " movq 40(%0), %%mm5\n"
18805 - " movntq %%mm5, 40(%1)\n"
18806 - " movq 48(%0), %%mm6\n"
18807 - " movntq %%mm6, 48(%1)\n"
18808 - " movq 56(%0), %%mm7\n"
18809 - " movntq %%mm7, 56(%1)\n"
18810 + "1: prefetch 320(%1)\n"
18811 + "2: movq (%1), %%mm0\n"
18812 + " movntq %%mm0, (%2)\n"
18813 + " movq 8(%1), %%mm1\n"
18814 + " movntq %%mm1, 8(%2)\n"
18815 + " movq 16(%1), %%mm2\n"
18816 + " movntq %%mm2, 16(%2)\n"
18817 + " movq 24(%1), %%mm3\n"
18818 + " movntq %%mm3, 24(%2)\n"
18819 + " movq 32(%1), %%mm4\n"
18820 + " movntq %%mm4, 32(%2)\n"
18821 + " movq 40(%1), %%mm5\n"
18822 + " movntq %%mm5, 40(%2)\n"
18823 + " movq 48(%1), %%mm6\n"
18824 + " movntq %%mm6, 48(%2)\n"
18825 + " movq 56(%1), %%mm7\n"
18826 + " movntq %%mm7, 56(%2)\n"
18827 ".section .fixup, \"ax\"\n"
18828 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18829 + "3:\n"
18830 +
18831 +#ifdef CONFIG_PAX_KERNEXEC
18832 + " movl %%cr0, %0\n"
18833 + " movl %0, %%eax\n"
18834 + " andl $0xFFFEFFFF, %%eax\n"
18835 + " movl %%eax, %%cr0\n"
18836 +#endif
18837 +
18838 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18839 +
18840 +#ifdef CONFIG_PAX_KERNEXEC
18841 + " movl %0, %%cr0\n"
18842 +#endif
18843 +
18844 " jmp 2b\n"
18845 ".previous\n"
18846 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
18847 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18848
18849 from += 64;
18850 to += 64;
18851 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
18852 static void fast_copy_page(void *to, void *from)
18853 {
18854 int i;
18855 + unsigned long cr0;
18856
18857 kernel_fpu_begin();
18858
18859 __asm__ __volatile__ (
18860 - "1: prefetch (%0)\n"
18861 - " prefetch 64(%0)\n"
18862 - " prefetch 128(%0)\n"
18863 - " prefetch 192(%0)\n"
18864 - " prefetch 256(%0)\n"
18865 + "1: prefetch (%1)\n"
18866 + " prefetch 64(%1)\n"
18867 + " prefetch 128(%1)\n"
18868 + " prefetch 192(%1)\n"
18869 + " prefetch 256(%1)\n"
18870 "2: \n"
18871 ".section .fixup, \"ax\"\n"
18872 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18873 + "3: \n"
18874 +
18875 +#ifdef CONFIG_PAX_KERNEXEC
18876 + " movl %%cr0, %0\n"
18877 + " movl %0, %%eax\n"
18878 + " andl $0xFFFEFFFF, %%eax\n"
18879 + " movl %%eax, %%cr0\n"
18880 +#endif
18881 +
18882 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18883 +
18884 +#ifdef CONFIG_PAX_KERNEXEC
18885 + " movl %0, %%cr0\n"
18886 +#endif
18887 +
18888 " jmp 2b\n"
18889 ".previous\n"
18890 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
18891 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18892
18893 for (i = 0; i < 4096/64; i++) {
18894 __asm__ __volatile__ (
18895 - "1: prefetch 320(%0)\n"
18896 - "2: movq (%0), %%mm0\n"
18897 - " movq 8(%0), %%mm1\n"
18898 - " movq 16(%0), %%mm2\n"
18899 - " movq 24(%0), %%mm3\n"
18900 - " movq %%mm0, (%1)\n"
18901 - " movq %%mm1, 8(%1)\n"
18902 - " movq %%mm2, 16(%1)\n"
18903 - " movq %%mm3, 24(%1)\n"
18904 - " movq 32(%0), %%mm0\n"
18905 - " movq 40(%0), %%mm1\n"
18906 - " movq 48(%0), %%mm2\n"
18907 - " movq 56(%0), %%mm3\n"
18908 - " movq %%mm0, 32(%1)\n"
18909 - " movq %%mm1, 40(%1)\n"
18910 - " movq %%mm2, 48(%1)\n"
18911 - " movq %%mm3, 56(%1)\n"
18912 + "1: prefetch 320(%1)\n"
18913 + "2: movq (%1), %%mm0\n"
18914 + " movq 8(%1), %%mm1\n"
18915 + " movq 16(%1), %%mm2\n"
18916 + " movq 24(%1), %%mm3\n"
18917 + " movq %%mm0, (%2)\n"
18918 + " movq %%mm1, 8(%2)\n"
18919 + " movq %%mm2, 16(%2)\n"
18920 + " movq %%mm3, 24(%2)\n"
18921 + " movq 32(%1), %%mm0\n"
18922 + " movq 40(%1), %%mm1\n"
18923 + " movq 48(%1), %%mm2\n"
18924 + " movq 56(%1), %%mm3\n"
18925 + " movq %%mm0, 32(%2)\n"
18926 + " movq %%mm1, 40(%2)\n"
18927 + " movq %%mm2, 48(%2)\n"
18928 + " movq %%mm3, 56(%2)\n"
18929 ".section .fixup, \"ax\"\n"
18930 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18931 + "3:\n"
18932 +
18933 +#ifdef CONFIG_PAX_KERNEXEC
18934 + " movl %%cr0, %0\n"
18935 + " movl %0, %%eax\n"
18936 + " andl $0xFFFEFFFF, %%eax\n"
18937 + " movl %%eax, %%cr0\n"
18938 +#endif
18939 +
18940 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18941 +
18942 +#ifdef CONFIG_PAX_KERNEXEC
18943 + " movl %0, %%cr0\n"
18944 +#endif
18945 +
18946 " jmp 2b\n"
18947 ".previous\n"
18948 _ASM_EXTABLE(1b, 3b)
18949 - : : "r" (from), "r" (to) : "memory");
18950 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18951
18952 from += 64;
18953 to += 64;
18954 diff -urNp linux-2.6.32.42/arch/x86/lib/putuser.S linux-2.6.32.42/arch/x86/lib/putuser.S
18955 --- linux-2.6.32.42/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
18956 +++ linux-2.6.32.42/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
18957 @@ -15,7 +15,8 @@
18958 #include <asm/thread_info.h>
18959 #include <asm/errno.h>
18960 #include <asm/asm.h>
18961 -
18962 +#include <asm/segment.h>
18963 +#include <asm/pgtable.h>
18964
18965 /*
18966 * __put_user_X
18967 @@ -29,52 +30,119 @@
18968 * as they get called from within inline assembly.
18969 */
18970
18971 -#define ENTER CFI_STARTPROC ; \
18972 - GET_THREAD_INFO(%_ASM_BX)
18973 +#define ENTER CFI_STARTPROC
18974 #define EXIT ret ; \
18975 CFI_ENDPROC
18976
18977 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18978 +#define _DEST %_ASM_CX,%_ASM_BX
18979 +#else
18980 +#define _DEST %_ASM_CX
18981 +#endif
18982 +
18983 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18984 +#define __copyuser_seg gs;
18985 +#else
18986 +#define __copyuser_seg
18987 +#endif
18988 +
18989 .text
18990 ENTRY(__put_user_1)
18991 ENTER
18992 +
18993 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18994 + GET_THREAD_INFO(%_ASM_BX)
18995 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
18996 jae bad_put_user
18997 -1: movb %al,(%_ASM_CX)
18998 +
18999 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19000 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19001 + cmp %_ASM_BX,%_ASM_CX
19002 + jb 1234f
19003 + xor %ebx,%ebx
19004 +1234:
19005 +#endif
19006 +
19007 +#endif
19008 +
19009 +1: __copyuser_seg movb %al,(_DEST)
19010 xor %eax,%eax
19011 EXIT
19012 ENDPROC(__put_user_1)
19013
19014 ENTRY(__put_user_2)
19015 ENTER
19016 +
19017 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19018 + GET_THREAD_INFO(%_ASM_BX)
19019 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19020 sub $1,%_ASM_BX
19021 cmp %_ASM_BX,%_ASM_CX
19022 jae bad_put_user
19023 -2: movw %ax,(%_ASM_CX)
19024 +
19025 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19026 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19027 + cmp %_ASM_BX,%_ASM_CX
19028 + jb 1234f
19029 + xor %ebx,%ebx
19030 +1234:
19031 +#endif
19032 +
19033 +#endif
19034 +
19035 +2: __copyuser_seg movw %ax,(_DEST)
19036 xor %eax,%eax
19037 EXIT
19038 ENDPROC(__put_user_2)
19039
19040 ENTRY(__put_user_4)
19041 ENTER
19042 +
19043 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19044 + GET_THREAD_INFO(%_ASM_BX)
19045 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19046 sub $3,%_ASM_BX
19047 cmp %_ASM_BX,%_ASM_CX
19048 jae bad_put_user
19049 -3: movl %eax,(%_ASM_CX)
19050 +
19051 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19052 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19053 + cmp %_ASM_BX,%_ASM_CX
19054 + jb 1234f
19055 + xor %ebx,%ebx
19056 +1234:
19057 +#endif
19058 +
19059 +#endif
19060 +
19061 +3: __copyuser_seg movl %eax,(_DEST)
19062 xor %eax,%eax
19063 EXIT
19064 ENDPROC(__put_user_4)
19065
19066 ENTRY(__put_user_8)
19067 ENTER
19068 +
19069 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19070 + GET_THREAD_INFO(%_ASM_BX)
19071 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19072 sub $7,%_ASM_BX
19073 cmp %_ASM_BX,%_ASM_CX
19074 jae bad_put_user
19075 -4: mov %_ASM_AX,(%_ASM_CX)
19076 +
19077 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19078 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19079 + cmp %_ASM_BX,%_ASM_CX
19080 + jb 1234f
19081 + xor %ebx,%ebx
19082 +1234:
19083 +#endif
19084 +
19085 +#endif
19086 +
19087 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
19088 #ifdef CONFIG_X86_32
19089 -5: movl %edx,4(%_ASM_CX)
19090 +5: __copyuser_seg movl %edx,4(_DEST)
19091 #endif
19092 xor %eax,%eax
19093 EXIT
19094 diff -urNp linux-2.6.32.42/arch/x86/lib/usercopy_32.c linux-2.6.32.42/arch/x86/lib/usercopy_32.c
19095 --- linux-2.6.32.42/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
19096 +++ linux-2.6.32.42/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
19097 @@ -43,7 +43,7 @@ do { \
19098 __asm__ __volatile__( \
19099 " testl %1,%1\n" \
19100 " jz 2f\n" \
19101 - "0: lodsb\n" \
19102 + "0: "__copyuser_seg"lodsb\n" \
19103 " stosb\n" \
19104 " testb %%al,%%al\n" \
19105 " jz 1f\n" \
19106 @@ -128,10 +128,12 @@ do { \
19107 int __d0; \
19108 might_fault(); \
19109 __asm__ __volatile__( \
19110 + __COPYUSER_SET_ES \
19111 "0: rep; stosl\n" \
19112 " movl %2,%0\n" \
19113 "1: rep; stosb\n" \
19114 "2:\n" \
19115 + __COPYUSER_RESTORE_ES \
19116 ".section .fixup,\"ax\"\n" \
19117 "3: lea 0(%2,%0,4),%0\n" \
19118 " jmp 2b\n" \
19119 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19120 might_fault();
19121
19122 __asm__ __volatile__(
19123 + __COPYUSER_SET_ES
19124 " testl %0, %0\n"
19125 " jz 3f\n"
19126 " andl %0,%%ecx\n"
19127 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19128 " subl %%ecx,%0\n"
19129 " addl %0,%%eax\n"
19130 "1:\n"
19131 + __COPYUSER_RESTORE_ES
19132 ".section .fixup,\"ax\"\n"
19133 "2: xorl %%eax,%%eax\n"
19134 " jmp 1b\n"
19135 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19136
19137 #ifdef CONFIG_X86_INTEL_USERCOPY
19138 static unsigned long
19139 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
19140 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19141 {
19142 int d0, d1;
19143 __asm__ __volatile__(
19144 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19145 " .align 2,0x90\n"
19146 "3: movl 0(%4), %%eax\n"
19147 "4: movl 4(%4), %%edx\n"
19148 - "5: movl %%eax, 0(%3)\n"
19149 - "6: movl %%edx, 4(%3)\n"
19150 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19151 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19152 "7: movl 8(%4), %%eax\n"
19153 "8: movl 12(%4),%%edx\n"
19154 - "9: movl %%eax, 8(%3)\n"
19155 - "10: movl %%edx, 12(%3)\n"
19156 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19157 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19158 "11: movl 16(%4), %%eax\n"
19159 "12: movl 20(%4), %%edx\n"
19160 - "13: movl %%eax, 16(%3)\n"
19161 - "14: movl %%edx, 20(%3)\n"
19162 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19163 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19164 "15: movl 24(%4), %%eax\n"
19165 "16: movl 28(%4), %%edx\n"
19166 - "17: movl %%eax, 24(%3)\n"
19167 - "18: movl %%edx, 28(%3)\n"
19168 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19169 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19170 "19: movl 32(%4), %%eax\n"
19171 "20: movl 36(%4), %%edx\n"
19172 - "21: movl %%eax, 32(%3)\n"
19173 - "22: movl %%edx, 36(%3)\n"
19174 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19175 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19176 "23: movl 40(%4), %%eax\n"
19177 "24: movl 44(%4), %%edx\n"
19178 - "25: movl %%eax, 40(%3)\n"
19179 - "26: movl %%edx, 44(%3)\n"
19180 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19181 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19182 "27: movl 48(%4), %%eax\n"
19183 "28: movl 52(%4), %%edx\n"
19184 - "29: movl %%eax, 48(%3)\n"
19185 - "30: movl %%edx, 52(%3)\n"
19186 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19187 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19188 "31: movl 56(%4), %%eax\n"
19189 "32: movl 60(%4), %%edx\n"
19190 - "33: movl %%eax, 56(%3)\n"
19191 - "34: movl %%edx, 60(%3)\n"
19192 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19193 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19194 " addl $-64, %0\n"
19195 " addl $64, %4\n"
19196 " addl $64, %3\n"
19197 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19198 " shrl $2, %0\n"
19199 " andl $3, %%eax\n"
19200 " cld\n"
19201 + __COPYUSER_SET_ES
19202 "99: rep; movsl\n"
19203 "36: movl %%eax, %0\n"
19204 "37: rep; movsb\n"
19205 "100:\n"
19206 + __COPYUSER_RESTORE_ES
19207 + ".section .fixup,\"ax\"\n"
19208 + "101: lea 0(%%eax,%0,4),%0\n"
19209 + " jmp 100b\n"
19210 + ".previous\n"
19211 + ".section __ex_table,\"a\"\n"
19212 + " .align 4\n"
19213 + " .long 1b,100b\n"
19214 + " .long 2b,100b\n"
19215 + " .long 3b,100b\n"
19216 + " .long 4b,100b\n"
19217 + " .long 5b,100b\n"
19218 + " .long 6b,100b\n"
19219 + " .long 7b,100b\n"
19220 + " .long 8b,100b\n"
19221 + " .long 9b,100b\n"
19222 + " .long 10b,100b\n"
19223 + " .long 11b,100b\n"
19224 + " .long 12b,100b\n"
19225 + " .long 13b,100b\n"
19226 + " .long 14b,100b\n"
19227 + " .long 15b,100b\n"
19228 + " .long 16b,100b\n"
19229 + " .long 17b,100b\n"
19230 + " .long 18b,100b\n"
19231 + " .long 19b,100b\n"
19232 + " .long 20b,100b\n"
19233 + " .long 21b,100b\n"
19234 + " .long 22b,100b\n"
19235 + " .long 23b,100b\n"
19236 + " .long 24b,100b\n"
19237 + " .long 25b,100b\n"
19238 + " .long 26b,100b\n"
19239 + " .long 27b,100b\n"
19240 + " .long 28b,100b\n"
19241 + " .long 29b,100b\n"
19242 + " .long 30b,100b\n"
19243 + " .long 31b,100b\n"
19244 + " .long 32b,100b\n"
19245 + " .long 33b,100b\n"
19246 + " .long 34b,100b\n"
19247 + " .long 35b,100b\n"
19248 + " .long 36b,100b\n"
19249 + " .long 37b,100b\n"
19250 + " .long 99b,101b\n"
19251 + ".previous"
19252 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
19253 + : "1"(to), "2"(from), "0"(size)
19254 + : "eax", "edx", "memory");
19255 + return size;
19256 +}
19257 +
19258 +static unsigned long
19259 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
19260 +{
19261 + int d0, d1;
19262 + __asm__ __volatile__(
19263 + " .align 2,0x90\n"
19264 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
19265 + " cmpl $67, %0\n"
19266 + " jbe 3f\n"
19267 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
19268 + " .align 2,0x90\n"
19269 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
19270 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
19271 + "5: movl %%eax, 0(%3)\n"
19272 + "6: movl %%edx, 4(%3)\n"
19273 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
19274 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
19275 + "9: movl %%eax, 8(%3)\n"
19276 + "10: movl %%edx, 12(%3)\n"
19277 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
19278 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
19279 + "13: movl %%eax, 16(%3)\n"
19280 + "14: movl %%edx, 20(%3)\n"
19281 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
19282 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
19283 + "17: movl %%eax, 24(%3)\n"
19284 + "18: movl %%edx, 28(%3)\n"
19285 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
19286 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
19287 + "21: movl %%eax, 32(%3)\n"
19288 + "22: movl %%edx, 36(%3)\n"
19289 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
19290 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
19291 + "25: movl %%eax, 40(%3)\n"
19292 + "26: movl %%edx, 44(%3)\n"
19293 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
19294 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
19295 + "29: movl %%eax, 48(%3)\n"
19296 + "30: movl %%edx, 52(%3)\n"
19297 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
19298 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
19299 + "33: movl %%eax, 56(%3)\n"
19300 + "34: movl %%edx, 60(%3)\n"
19301 + " addl $-64, %0\n"
19302 + " addl $64, %4\n"
19303 + " addl $64, %3\n"
19304 + " cmpl $63, %0\n"
19305 + " ja 1b\n"
19306 + "35: movl %0, %%eax\n"
19307 + " shrl $2, %0\n"
19308 + " andl $3, %%eax\n"
19309 + " cld\n"
19310 + "99: rep; "__copyuser_seg" movsl\n"
19311 + "36: movl %%eax, %0\n"
19312 + "37: rep; "__copyuser_seg" movsb\n"
19313 + "100:\n"
19314 ".section .fixup,\"ax\"\n"
19315 "101: lea 0(%%eax,%0,4),%0\n"
19316 " jmp 100b\n"
19317 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
19318 int d0, d1;
19319 __asm__ __volatile__(
19320 " .align 2,0x90\n"
19321 - "0: movl 32(%4), %%eax\n"
19322 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19323 " cmpl $67, %0\n"
19324 " jbe 2f\n"
19325 - "1: movl 64(%4), %%eax\n"
19326 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19327 " .align 2,0x90\n"
19328 - "2: movl 0(%4), %%eax\n"
19329 - "21: movl 4(%4), %%edx\n"
19330 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19331 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19332 " movl %%eax, 0(%3)\n"
19333 " movl %%edx, 4(%3)\n"
19334 - "3: movl 8(%4), %%eax\n"
19335 - "31: movl 12(%4),%%edx\n"
19336 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19337 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19338 " movl %%eax, 8(%3)\n"
19339 " movl %%edx, 12(%3)\n"
19340 - "4: movl 16(%4), %%eax\n"
19341 - "41: movl 20(%4), %%edx\n"
19342 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19343 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19344 " movl %%eax, 16(%3)\n"
19345 " movl %%edx, 20(%3)\n"
19346 - "10: movl 24(%4), %%eax\n"
19347 - "51: movl 28(%4), %%edx\n"
19348 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19349 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19350 " movl %%eax, 24(%3)\n"
19351 " movl %%edx, 28(%3)\n"
19352 - "11: movl 32(%4), %%eax\n"
19353 - "61: movl 36(%4), %%edx\n"
19354 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19355 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19356 " movl %%eax, 32(%3)\n"
19357 " movl %%edx, 36(%3)\n"
19358 - "12: movl 40(%4), %%eax\n"
19359 - "71: movl 44(%4), %%edx\n"
19360 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19361 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19362 " movl %%eax, 40(%3)\n"
19363 " movl %%edx, 44(%3)\n"
19364 - "13: movl 48(%4), %%eax\n"
19365 - "81: movl 52(%4), %%edx\n"
19366 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19367 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19368 " movl %%eax, 48(%3)\n"
19369 " movl %%edx, 52(%3)\n"
19370 - "14: movl 56(%4), %%eax\n"
19371 - "91: movl 60(%4), %%edx\n"
19372 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19373 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19374 " movl %%eax, 56(%3)\n"
19375 " movl %%edx, 60(%3)\n"
19376 " addl $-64, %0\n"
19377 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
19378 " shrl $2, %0\n"
19379 " andl $3, %%eax\n"
19380 " cld\n"
19381 - "6: rep; movsl\n"
19382 + "6: rep; "__copyuser_seg" movsl\n"
19383 " movl %%eax,%0\n"
19384 - "7: rep; movsb\n"
19385 + "7: rep; "__copyuser_seg" movsb\n"
19386 "8:\n"
19387 ".section .fixup,\"ax\"\n"
19388 "9: lea 0(%%eax,%0,4),%0\n"
19389 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
19390
19391 __asm__ __volatile__(
19392 " .align 2,0x90\n"
19393 - "0: movl 32(%4), %%eax\n"
19394 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19395 " cmpl $67, %0\n"
19396 " jbe 2f\n"
19397 - "1: movl 64(%4), %%eax\n"
19398 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19399 " .align 2,0x90\n"
19400 - "2: movl 0(%4), %%eax\n"
19401 - "21: movl 4(%4), %%edx\n"
19402 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19403 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19404 " movnti %%eax, 0(%3)\n"
19405 " movnti %%edx, 4(%3)\n"
19406 - "3: movl 8(%4), %%eax\n"
19407 - "31: movl 12(%4),%%edx\n"
19408 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19409 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19410 " movnti %%eax, 8(%3)\n"
19411 " movnti %%edx, 12(%3)\n"
19412 - "4: movl 16(%4), %%eax\n"
19413 - "41: movl 20(%4), %%edx\n"
19414 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19415 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19416 " movnti %%eax, 16(%3)\n"
19417 " movnti %%edx, 20(%3)\n"
19418 - "10: movl 24(%4), %%eax\n"
19419 - "51: movl 28(%4), %%edx\n"
19420 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19421 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19422 " movnti %%eax, 24(%3)\n"
19423 " movnti %%edx, 28(%3)\n"
19424 - "11: movl 32(%4), %%eax\n"
19425 - "61: movl 36(%4), %%edx\n"
19426 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19427 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19428 " movnti %%eax, 32(%3)\n"
19429 " movnti %%edx, 36(%3)\n"
19430 - "12: movl 40(%4), %%eax\n"
19431 - "71: movl 44(%4), %%edx\n"
19432 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19433 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19434 " movnti %%eax, 40(%3)\n"
19435 " movnti %%edx, 44(%3)\n"
19436 - "13: movl 48(%4), %%eax\n"
19437 - "81: movl 52(%4), %%edx\n"
19438 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19439 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19440 " movnti %%eax, 48(%3)\n"
19441 " movnti %%edx, 52(%3)\n"
19442 - "14: movl 56(%4), %%eax\n"
19443 - "91: movl 60(%4), %%edx\n"
19444 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19445 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19446 " movnti %%eax, 56(%3)\n"
19447 " movnti %%edx, 60(%3)\n"
19448 " addl $-64, %0\n"
19449 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
19450 " shrl $2, %0\n"
19451 " andl $3, %%eax\n"
19452 " cld\n"
19453 - "6: rep; movsl\n"
19454 + "6: rep; "__copyuser_seg" movsl\n"
19455 " movl %%eax,%0\n"
19456 - "7: rep; movsb\n"
19457 + "7: rep; "__copyuser_seg" movsb\n"
19458 "8:\n"
19459 ".section .fixup,\"ax\"\n"
19460 "9: lea 0(%%eax,%0,4),%0\n"
19461 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
19462
19463 __asm__ __volatile__(
19464 " .align 2,0x90\n"
19465 - "0: movl 32(%4), %%eax\n"
19466 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19467 " cmpl $67, %0\n"
19468 " jbe 2f\n"
19469 - "1: movl 64(%4), %%eax\n"
19470 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19471 " .align 2,0x90\n"
19472 - "2: movl 0(%4), %%eax\n"
19473 - "21: movl 4(%4), %%edx\n"
19474 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19475 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19476 " movnti %%eax, 0(%3)\n"
19477 " movnti %%edx, 4(%3)\n"
19478 - "3: movl 8(%4), %%eax\n"
19479 - "31: movl 12(%4),%%edx\n"
19480 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19481 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19482 " movnti %%eax, 8(%3)\n"
19483 " movnti %%edx, 12(%3)\n"
19484 - "4: movl 16(%4), %%eax\n"
19485 - "41: movl 20(%4), %%edx\n"
19486 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19487 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19488 " movnti %%eax, 16(%3)\n"
19489 " movnti %%edx, 20(%3)\n"
19490 - "10: movl 24(%4), %%eax\n"
19491 - "51: movl 28(%4), %%edx\n"
19492 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19493 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19494 " movnti %%eax, 24(%3)\n"
19495 " movnti %%edx, 28(%3)\n"
19496 - "11: movl 32(%4), %%eax\n"
19497 - "61: movl 36(%4), %%edx\n"
19498 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19499 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19500 " movnti %%eax, 32(%3)\n"
19501 " movnti %%edx, 36(%3)\n"
19502 - "12: movl 40(%4), %%eax\n"
19503 - "71: movl 44(%4), %%edx\n"
19504 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19505 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19506 " movnti %%eax, 40(%3)\n"
19507 " movnti %%edx, 44(%3)\n"
19508 - "13: movl 48(%4), %%eax\n"
19509 - "81: movl 52(%4), %%edx\n"
19510 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19511 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19512 " movnti %%eax, 48(%3)\n"
19513 " movnti %%edx, 52(%3)\n"
19514 - "14: movl 56(%4), %%eax\n"
19515 - "91: movl 60(%4), %%edx\n"
19516 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19517 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19518 " movnti %%eax, 56(%3)\n"
19519 " movnti %%edx, 60(%3)\n"
19520 " addl $-64, %0\n"
19521 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
19522 " shrl $2, %0\n"
19523 " andl $3, %%eax\n"
19524 " cld\n"
19525 - "6: rep; movsl\n"
19526 + "6: rep; "__copyuser_seg" movsl\n"
19527 " movl %%eax,%0\n"
19528 - "7: rep; movsb\n"
19529 + "7: rep; "__copyuser_seg" movsb\n"
19530 "8:\n"
19531 ".section .fixup,\"ax\"\n"
19532 "9: lea 0(%%eax,%0,4),%0\n"
19533 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
19534 */
19535 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
19536 unsigned long size);
19537 -unsigned long __copy_user_intel(void __user *to, const void *from,
19538 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
19539 + unsigned long size);
19540 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
19541 unsigned long size);
19542 unsigned long __copy_user_zeroing_intel_nocache(void *to,
19543 const void __user *from, unsigned long size);
19544 #endif /* CONFIG_X86_INTEL_USERCOPY */
19545
19546 /* Generic arbitrary sized copy. */
19547 -#define __copy_user(to, from, size) \
19548 +#define __copy_user(to, from, size, prefix, set, restore) \
19549 do { \
19550 int __d0, __d1, __d2; \
19551 __asm__ __volatile__( \
19552 + set \
19553 " cmp $7,%0\n" \
19554 " jbe 1f\n" \
19555 " movl %1,%0\n" \
19556 " negl %0\n" \
19557 " andl $7,%0\n" \
19558 " subl %0,%3\n" \
19559 - "4: rep; movsb\n" \
19560 + "4: rep; "prefix"movsb\n" \
19561 " movl %3,%0\n" \
19562 " shrl $2,%0\n" \
19563 " andl $3,%3\n" \
19564 " .align 2,0x90\n" \
19565 - "0: rep; movsl\n" \
19566 + "0: rep; "prefix"movsl\n" \
19567 " movl %3,%0\n" \
19568 - "1: rep; movsb\n" \
19569 + "1: rep; "prefix"movsb\n" \
19570 "2:\n" \
19571 + restore \
19572 ".section .fixup,\"ax\"\n" \
19573 "5: addl %3,%0\n" \
19574 " jmp 2b\n" \
19575 @@ -682,14 +799,14 @@ do { \
19576 " negl %0\n" \
19577 " andl $7,%0\n" \
19578 " subl %0,%3\n" \
19579 - "4: rep; movsb\n" \
19580 + "4: rep; "__copyuser_seg"movsb\n" \
19581 " movl %3,%0\n" \
19582 " shrl $2,%0\n" \
19583 " andl $3,%3\n" \
19584 " .align 2,0x90\n" \
19585 - "0: rep; movsl\n" \
19586 + "0: rep; "__copyuser_seg"movsl\n" \
19587 " movl %3,%0\n" \
19588 - "1: rep; movsb\n" \
19589 + "1: rep; "__copyuser_seg"movsb\n" \
19590 "2:\n" \
19591 ".section .fixup,\"ax\"\n" \
19592 "5: addl %3,%0\n" \
19593 @@ -775,9 +892,9 @@ survive:
19594 }
19595 #endif
19596 if (movsl_is_ok(to, from, n))
19597 - __copy_user(to, from, n);
19598 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
19599 else
19600 - n = __copy_user_intel(to, from, n);
19601 + n = __generic_copy_to_user_intel(to, from, n);
19602 return n;
19603 }
19604 EXPORT_SYMBOL(__copy_to_user_ll);
19605 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
19606 unsigned long n)
19607 {
19608 if (movsl_is_ok(to, from, n))
19609 - __copy_user(to, from, n);
19610 + __copy_user(to, from, n, __copyuser_seg, "", "");
19611 else
19612 - n = __copy_user_intel((void __user *)to,
19613 - (const void *)from, n);
19614 + n = __generic_copy_from_user_intel(to, from, n);
19615 return n;
19616 }
19617 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
19618 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
19619 if (n > 64 && cpu_has_xmm2)
19620 n = __copy_user_intel_nocache(to, from, n);
19621 else
19622 - __copy_user(to, from, n);
19623 + __copy_user(to, from, n, __copyuser_seg, "", "");
19624 #else
19625 - __copy_user(to, from, n);
19626 + __copy_user(to, from, n, __copyuser_seg, "", "");
19627 #endif
19628 return n;
19629 }
19630 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
19631
19632 -/**
19633 - * copy_to_user: - Copy a block of data into user space.
19634 - * @to: Destination address, in user space.
19635 - * @from: Source address, in kernel space.
19636 - * @n: Number of bytes to copy.
19637 - *
19638 - * Context: User context only. This function may sleep.
19639 - *
19640 - * Copy data from kernel space to user space.
19641 - *
19642 - * Returns number of bytes that could not be copied.
19643 - * On success, this will be zero.
19644 - */
19645 -unsigned long
19646 -copy_to_user(void __user *to, const void *from, unsigned long n)
19647 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19648 +void __set_fs(mm_segment_t x)
19649 {
19650 - if (access_ok(VERIFY_WRITE, to, n))
19651 - n = __copy_to_user(to, from, n);
19652 - return n;
19653 + switch (x.seg) {
19654 + case 0:
19655 + loadsegment(gs, 0);
19656 + break;
19657 + case TASK_SIZE_MAX:
19658 + loadsegment(gs, __USER_DS);
19659 + break;
19660 + case -1UL:
19661 + loadsegment(gs, __KERNEL_DS);
19662 + break;
19663 + default:
19664 + BUG();
19665 + }
19666 + return;
19667 }
19668 -EXPORT_SYMBOL(copy_to_user);
19669 +EXPORT_SYMBOL(__set_fs);
19670
19671 -/**
19672 - * copy_from_user: - Copy a block of data from user space.
19673 - * @to: Destination address, in kernel space.
19674 - * @from: Source address, in user space.
19675 - * @n: Number of bytes to copy.
19676 - *
19677 - * Context: User context only. This function may sleep.
19678 - *
19679 - * Copy data from user space to kernel space.
19680 - *
19681 - * Returns number of bytes that could not be copied.
19682 - * On success, this will be zero.
19683 - *
19684 - * If some data could not be copied, this function will pad the copied
19685 - * data to the requested size using zero bytes.
19686 - */
19687 -unsigned long
19688 -copy_from_user(void *to, const void __user *from, unsigned long n)
19689 +void set_fs(mm_segment_t x)
19690 {
19691 - if (access_ok(VERIFY_READ, from, n))
19692 - n = __copy_from_user(to, from, n);
19693 - else
19694 - memset(to, 0, n);
19695 - return n;
19696 + current_thread_info()->addr_limit = x;
19697 + __set_fs(x);
19698 }
19699 -EXPORT_SYMBOL(copy_from_user);
19700 +EXPORT_SYMBOL(set_fs);
19701 +#endif
19702 diff -urNp linux-2.6.32.42/arch/x86/lib/usercopy_64.c linux-2.6.32.42/arch/x86/lib/usercopy_64.c
19703 --- linux-2.6.32.42/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
19704 +++ linux-2.6.32.42/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
19705 @@ -42,6 +42,12 @@ long
19706 __strncpy_from_user(char *dst, const char __user *src, long count)
19707 {
19708 long res;
19709 +
19710 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19711 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19712 + src += PAX_USER_SHADOW_BASE;
19713 +#endif
19714 +
19715 __do_strncpy_from_user(dst, src, count, res);
19716 return res;
19717 }
19718 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
19719 {
19720 long __d0;
19721 might_fault();
19722 +
19723 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19724 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
19725 + addr += PAX_USER_SHADOW_BASE;
19726 +#endif
19727 +
19728 /* no memory constraint because it doesn't change any memory gcc knows
19729 about */
19730 asm volatile(
19731 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
19732
19733 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
19734 {
19735 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19736 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19737 +
19738 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19739 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
19740 + to += PAX_USER_SHADOW_BASE;
19741 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
19742 + from += PAX_USER_SHADOW_BASE;
19743 +#endif
19744 +
19745 return copy_user_generic((__force void *)to, (__force void *)from, len);
19746 - }
19747 - return len;
19748 + }
19749 + return len;
19750 }
19751 EXPORT_SYMBOL(copy_in_user);
19752
19753 diff -urNp linux-2.6.32.42/arch/x86/Makefile linux-2.6.32.42/arch/x86/Makefile
19754 --- linux-2.6.32.42/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
19755 +++ linux-2.6.32.42/arch/x86/Makefile 2011-04-17 15:56:46.000000000 -0400
19756 @@ -189,3 +189,12 @@ define archhelp
19757 echo ' FDARGS="..." arguments for the booted kernel'
19758 echo ' FDINITRD=file initrd for the booted kernel'
19759 endef
19760 +
19761 +define OLD_LD
19762 +
19763 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
19764 +*** Please upgrade your binutils to 2.18 or newer
19765 +endef
19766 +
19767 +archprepare:
19768 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
19769 diff -urNp linux-2.6.32.42/arch/x86/mm/extable.c linux-2.6.32.42/arch/x86/mm/extable.c
19770 --- linux-2.6.32.42/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
19771 +++ linux-2.6.32.42/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
19772 @@ -1,14 +1,71 @@
19773 #include <linux/module.h>
19774 #include <linux/spinlock.h>
19775 +#include <linux/sort.h>
19776 #include <asm/uaccess.h>
19777 +#include <asm/pgtable.h>
19778
19779 +/*
19780 + * The exception table needs to be sorted so that the binary
19781 + * search that we use to find entries in it works properly.
19782 + * This is used both for the kernel exception table and for
19783 + * the exception tables of modules that get loaded.
19784 + */
19785 +static int cmp_ex(const void *a, const void *b)
19786 +{
19787 + const struct exception_table_entry *x = a, *y = b;
19788 +
19789 + /* avoid overflow */
19790 + if (x->insn > y->insn)
19791 + return 1;
19792 + if (x->insn < y->insn)
19793 + return -1;
19794 + return 0;
19795 +}
19796 +
19797 +static void swap_ex(void *a, void *b, int size)
19798 +{
19799 + struct exception_table_entry t, *x = a, *y = b;
19800 +
19801 + t = *x;
19802 +
19803 + pax_open_kernel();
19804 + *x = *y;
19805 + *y = t;
19806 + pax_close_kernel();
19807 +}
19808 +
19809 +void sort_extable(struct exception_table_entry *start,
19810 + struct exception_table_entry *finish)
19811 +{
19812 + sort(start, finish - start, sizeof(struct exception_table_entry),
19813 + cmp_ex, swap_ex);
19814 +}
19815 +
19816 +#ifdef CONFIG_MODULES
19817 +/*
19818 + * If the exception table is sorted, any referring to the module init
19819 + * will be at the beginning or the end.
19820 + */
19821 +void trim_init_extable(struct module *m)
19822 +{
19823 + /*trim the beginning*/
19824 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
19825 + m->extable++;
19826 + m->num_exentries--;
19827 + }
19828 + /*trim the end*/
19829 + while (m->num_exentries &&
19830 + within_module_init(m->extable[m->num_exentries-1].insn, m))
19831 + m->num_exentries--;
19832 +}
19833 +#endif /* CONFIG_MODULES */
19834
19835 int fixup_exception(struct pt_regs *regs)
19836 {
19837 const struct exception_table_entry *fixup;
19838
19839 #ifdef CONFIG_PNPBIOS
19840 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19841 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19842 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19843 extern u32 pnp_bios_is_utter_crap;
19844 pnp_bios_is_utter_crap = 1;
19845 diff -urNp linux-2.6.32.42/arch/x86/mm/fault.c linux-2.6.32.42/arch/x86/mm/fault.c
19846 --- linux-2.6.32.42/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
19847 +++ linux-2.6.32.42/arch/x86/mm/fault.c 2011-06-06 17:35:16.000000000 -0400
19848 @@ -11,10 +11,19 @@
19849 #include <linux/kprobes.h> /* __kprobes, ... */
19850 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
19851 #include <linux/perf_event.h> /* perf_sw_event */
19852 +#include <linux/unistd.h>
19853 +#include <linux/compiler.h>
19854
19855 #include <asm/traps.h> /* dotraplinkage, ... */
19856 #include <asm/pgalloc.h> /* pgd_*(), ... */
19857 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
19858 +#include <asm/vsyscall.h>
19859 +#include <asm/tlbflush.h>
19860 +
19861 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19862 +#include <asm/stacktrace.h>
19863 +#include "../kernel/dumpstack.h"
19864 +#endif
19865
19866 /*
19867 * Page fault error code bits:
19868 @@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
19869 int ret = 0;
19870
19871 /* kprobe_running() needs smp_processor_id() */
19872 - if (kprobes_built_in() && !user_mode_vm(regs)) {
19873 + if (kprobes_built_in() && !user_mode(regs)) {
19874 preempt_disable();
19875 if (kprobe_running() && kprobe_fault_handler(regs, 14))
19876 ret = 1;
19877 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
19878 return !instr_lo || (instr_lo>>1) == 1;
19879 case 0x00:
19880 /* Prefetch instruction is 0x0F0D or 0x0F18 */
19881 - if (probe_kernel_address(instr, opcode))
19882 + if (user_mode(regs)) {
19883 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
19884 + return 0;
19885 + } else if (probe_kernel_address(instr, opcode))
19886 return 0;
19887
19888 *prefetch = (instr_lo == 0xF) &&
19889 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
19890 while (instr < max_instr) {
19891 unsigned char opcode;
19892
19893 - if (probe_kernel_address(instr, opcode))
19894 + if (user_mode(regs)) {
19895 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
19896 + break;
19897 + } else if (probe_kernel_address(instr, opcode))
19898 break;
19899
19900 instr++;
19901 @@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
19902 force_sig_info(si_signo, &info, tsk);
19903 }
19904
19905 +#ifdef CONFIG_PAX_EMUTRAMP
19906 +static int pax_handle_fetch_fault(struct pt_regs *regs);
19907 +#endif
19908 +
19909 +#ifdef CONFIG_PAX_PAGEEXEC
19910 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
19911 +{
19912 + pgd_t *pgd;
19913 + pud_t *pud;
19914 + pmd_t *pmd;
19915 +
19916 + pgd = pgd_offset(mm, address);
19917 + if (!pgd_present(*pgd))
19918 + return NULL;
19919 + pud = pud_offset(pgd, address);
19920 + if (!pud_present(*pud))
19921 + return NULL;
19922 + pmd = pmd_offset(pud, address);
19923 + if (!pmd_present(*pmd))
19924 + return NULL;
19925 + return pmd;
19926 +}
19927 +#endif
19928 +
19929 DEFINE_SPINLOCK(pgd_lock);
19930 LIST_HEAD(pgd_list);
19931
19932 @@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
19933 address += PMD_SIZE) {
19934
19935 unsigned long flags;
19936 +
19937 +#ifdef CONFIG_PAX_PER_CPU_PGD
19938 + unsigned long cpu;
19939 +#else
19940 struct page *page;
19941 +#endif
19942
19943 spin_lock_irqsave(&pgd_lock, flags);
19944 +
19945 +#ifdef CONFIG_PAX_PER_CPU_PGD
19946 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19947 + pgd_t *pgd = get_cpu_pgd(cpu);
19948 +#else
19949 list_for_each_entry(page, &pgd_list, lru) {
19950 - if (!vmalloc_sync_one(page_address(page), address))
19951 + pgd_t *pgd = page_address(page);
19952 +#endif
19953 +
19954 + if (!vmalloc_sync_one(pgd, address))
19955 break;
19956 }
19957 spin_unlock_irqrestore(&pgd_lock, flags);
19958 @@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
19959 * an interrupt in the middle of a task switch..
19960 */
19961 pgd_paddr = read_cr3();
19962 +
19963 +#ifdef CONFIG_PAX_PER_CPU_PGD
19964 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19965 +#endif
19966 +
19967 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19968 if (!pmd_k)
19969 return -1;
19970 @@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
19971
19972 const pgd_t *pgd_ref = pgd_offset_k(address);
19973 unsigned long flags;
19974 +
19975 +#ifdef CONFIG_PAX_PER_CPU_PGD
19976 + unsigned long cpu;
19977 +#else
19978 struct page *page;
19979 +#endif
19980
19981 if (pgd_none(*pgd_ref))
19982 continue;
19983
19984 spin_lock_irqsave(&pgd_lock, flags);
19985 +
19986 +#ifdef CONFIG_PAX_PER_CPU_PGD
19987 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19988 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
19989 +#else
19990 list_for_each_entry(page, &pgd_list, lru) {
19991 pgd_t *pgd;
19992 pgd = (pgd_t *)page_address(page) + pgd_index(address);
19993 +#endif
19994 +
19995 if (pgd_none(*pgd))
19996 set_pgd(pgd, *pgd_ref);
19997 else
19998 @@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
19999 * happen within a race in page table update. In the later
20000 * case just flush:
20001 */
20002 +
20003 +#ifdef CONFIG_PAX_PER_CPU_PGD
20004 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
20005 + pgd = pgd_offset_cpu(smp_processor_id(), address);
20006 +#else
20007 pgd = pgd_offset(current->active_mm, address);
20008 +#endif
20009 +
20010 pgd_ref = pgd_offset_k(address);
20011 if (pgd_none(*pgd_ref))
20012 return -1;
20013 @@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
20014 static int is_errata100(struct pt_regs *regs, unsigned long address)
20015 {
20016 #ifdef CONFIG_X86_64
20017 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
20018 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
20019 return 1;
20020 #endif
20021 return 0;
20022 @@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
20023 }
20024
20025 static const char nx_warning[] = KERN_CRIT
20026 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20027 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20028
20029 static void
20030 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20031 @@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
20032 if (!oops_may_print())
20033 return;
20034
20035 - if (error_code & PF_INSTR) {
20036 + if (nx_enabled && (error_code & PF_INSTR)) {
20037 unsigned int level;
20038
20039 pte_t *pte = lookup_address(address, &level);
20040
20041 if (pte && pte_present(*pte) && !pte_exec(*pte))
20042 - printk(nx_warning, current_uid());
20043 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20044 }
20045
20046 +#ifdef CONFIG_PAX_KERNEXEC
20047 + if (init_mm.start_code <= address && address < init_mm.end_code) {
20048 + if (current->signal->curr_ip)
20049 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20050 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20051 + else
20052 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20053 + current->comm, task_pid_nr(current), current_uid(), current_euid());
20054 + }
20055 +#endif
20056 +
20057 printk(KERN_ALERT "BUG: unable to handle kernel ");
20058 if (address < PAGE_SIZE)
20059 printk(KERN_CONT "NULL pointer dereference");
20060 @@ -704,6 +791,68 @@ __bad_area_nosemaphore(struct pt_regs *r
20061 unsigned long address, int si_code)
20062 {
20063 struct task_struct *tsk = current;
20064 + struct mm_struct *mm = tsk->mm;
20065 +
20066 +#ifdef CONFIG_X86_64
20067 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
20068 + if (regs->ip == (unsigned long)vgettimeofday) {
20069 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
20070 + return;
20071 + } else if (regs->ip == (unsigned long)vtime) {
20072 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
20073 + return;
20074 + } else if (regs->ip == (unsigned long)vgetcpu) {
20075 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
20076 + return;
20077 + }
20078 + }
20079 +#endif
20080 +
20081 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20082 + if (mm && (error_code & PF_USER)) {
20083 + unsigned long ip = regs->ip;
20084 +
20085 + if (v8086_mode(regs))
20086 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20087 +
20088 + /*
20089 + * It's possible to have interrupts off here:
20090 + */
20091 + local_irq_enable();
20092 +
20093 +#ifdef CONFIG_PAX_PAGEEXEC
20094 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
20095 + ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
20096 +
20097 +#ifdef CONFIG_PAX_EMUTRAMP
20098 + switch (pax_handle_fetch_fault(regs)) {
20099 + case 2:
20100 + return;
20101 + }
20102 +#endif
20103 +
20104 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20105 + do_group_exit(SIGKILL);
20106 + }
20107 +#endif
20108 +
20109 +#ifdef CONFIG_PAX_SEGMEXEC
20110 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
20111 +
20112 +#ifdef CONFIG_PAX_EMUTRAMP
20113 + switch (pax_handle_fetch_fault(regs)) {
20114 + case 2:
20115 + return;
20116 + }
20117 +#endif
20118 +
20119 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20120 + do_group_exit(SIGKILL);
20121 + }
20122 +#endif
20123 +
20124 + }
20125 +#endif
20126
20127 /* User mode accesses just cause a SIGSEGV */
20128 if (error_code & PF_USER) {
20129 @@ -857,6 +1006,99 @@ static int spurious_fault_check(unsigned
20130 return 1;
20131 }
20132
20133 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20134 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20135 +{
20136 + pte_t *pte;
20137 + pmd_t *pmd;
20138 + spinlock_t *ptl;
20139 + unsigned char pte_mask;
20140 +
20141 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20142 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
20143 + return 0;
20144 +
20145 + /* PaX: it's our fault, let's handle it if we can */
20146 +
20147 + /* PaX: take a look at read faults before acquiring any locks */
20148 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20149 + /* instruction fetch attempt from a protected page in user mode */
20150 + up_read(&mm->mmap_sem);
20151 +
20152 +#ifdef CONFIG_PAX_EMUTRAMP
20153 + switch (pax_handle_fetch_fault(regs)) {
20154 + case 2:
20155 + return 1;
20156 + }
20157 +#endif
20158 +
20159 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20160 + do_group_exit(SIGKILL);
20161 + }
20162 +
20163 + pmd = pax_get_pmd(mm, address);
20164 + if (unlikely(!pmd))
20165 + return 0;
20166 +
20167 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20168 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20169 + pte_unmap_unlock(pte, ptl);
20170 + return 0;
20171 + }
20172 +
20173 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20174 + /* write attempt to a protected page in user mode */
20175 + pte_unmap_unlock(pte, ptl);
20176 + return 0;
20177 + }
20178 +
20179 +#ifdef CONFIG_SMP
20180 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20181 +#else
20182 + if (likely(address > get_limit(regs->cs)))
20183 +#endif
20184 + {
20185 + set_pte(pte, pte_mkread(*pte));
20186 + __flush_tlb_one(address);
20187 + pte_unmap_unlock(pte, ptl);
20188 + up_read(&mm->mmap_sem);
20189 + return 1;
20190 + }
20191 +
20192 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20193 +
20194 + /*
20195 + * PaX: fill DTLB with user rights and retry
20196 + */
20197 + __asm__ __volatile__ (
20198 + "orb %2,(%1)\n"
20199 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20200 +/*
20201 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20202 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20203 + * page fault when examined during a TLB load attempt. this is true not only
20204 + * for PTEs holding a non-present entry but also present entries that will
20205 + * raise a page fault (such as those set up by PaX, or the copy-on-write
20206 + * mechanism). in effect it means that we do *not* need to flush the TLBs
20207 + * for our target pages since their PTEs are simply not in the TLBs at all.
20208 +
20209 + * the best thing in omitting it is that we gain around 15-20% speed in the
20210 + * fast path of the page fault handler and can get rid of tracing since we
20211 + * can no longer flush unintended entries.
20212 + */
20213 + "invlpg (%0)\n"
20214 +#endif
20215 + __copyuser_seg"testb $0,(%0)\n"
20216 + "xorb %3,(%1)\n"
20217 + :
20218 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
20219 + : "memory", "cc");
20220 + pte_unmap_unlock(pte, ptl);
20221 + up_read(&mm->mmap_sem);
20222 + return 1;
20223 +}
20224 +#endif
20225 +
20226 /*
20227 * Handle a spurious fault caused by a stale TLB entry.
20228 *
20229 @@ -923,6 +1165,9 @@ int show_unhandled_signals = 1;
20230 static inline int
20231 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
20232 {
20233 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
20234 + return 1;
20235 +
20236 if (write) {
20237 /* write, present and write, not present: */
20238 if (unlikely(!(vma->vm_flags & VM_WRITE)))
20239 @@ -956,17 +1201,31 @@ do_page_fault(struct pt_regs *regs, unsi
20240 {
20241 struct vm_area_struct *vma;
20242 struct task_struct *tsk;
20243 - unsigned long address;
20244 struct mm_struct *mm;
20245 int write;
20246 int fault;
20247
20248 + /* Get the faulting address: */
20249 + unsigned long address = read_cr2();
20250 +
20251 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20252 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
20253 + if (!search_exception_tables(regs->ip)) {
20254 + bad_area_nosemaphore(regs, error_code, address);
20255 + return;
20256 + }
20257 + if (address < PAX_USER_SHADOW_BASE) {
20258 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
20259 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
20260 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
20261 + } else
20262 + address -= PAX_USER_SHADOW_BASE;
20263 + }
20264 +#endif
20265 +
20266 tsk = current;
20267 mm = tsk->mm;
20268
20269 - /* Get the faulting address: */
20270 - address = read_cr2();
20271 -
20272 /*
20273 * Detect and handle instructions that would cause a page fault for
20274 * both a tracked kernel page and a userspace page.
20275 @@ -1026,7 +1285,7 @@ do_page_fault(struct pt_regs *regs, unsi
20276 * User-mode registers count as a user access even for any
20277 * potential system fault or CPU buglet:
20278 */
20279 - if (user_mode_vm(regs)) {
20280 + if (user_mode(regs)) {
20281 local_irq_enable();
20282 error_code |= PF_USER;
20283 } else {
20284 @@ -1080,6 +1339,11 @@ do_page_fault(struct pt_regs *regs, unsi
20285 might_sleep();
20286 }
20287
20288 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20289 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
20290 + return;
20291 +#endif
20292 +
20293 vma = find_vma(mm, address);
20294 if (unlikely(!vma)) {
20295 bad_area(regs, error_code, address);
20296 @@ -1091,18 +1355,24 @@ do_page_fault(struct pt_regs *regs, unsi
20297 bad_area(regs, error_code, address);
20298 return;
20299 }
20300 - if (error_code & PF_USER) {
20301 - /*
20302 - * Accessing the stack below %sp is always a bug.
20303 - * The large cushion allows instructions like enter
20304 - * and pusha to work. ("enter $65535, $31" pushes
20305 - * 32 pointers and then decrements %sp by 65535.)
20306 - */
20307 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20308 - bad_area(regs, error_code, address);
20309 - return;
20310 - }
20311 + /*
20312 + * Accessing the stack below %sp is always a bug.
20313 + * The large cushion allows instructions like enter
20314 + * and pusha to work. ("enter $65535, $31" pushes
20315 + * 32 pointers and then decrements %sp by 65535.)
20316 + */
20317 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20318 + bad_area(regs, error_code, address);
20319 + return;
20320 + }
20321 +
20322 +#ifdef CONFIG_PAX_SEGMEXEC
20323 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
20324 + bad_area(regs, error_code, address);
20325 + return;
20326 }
20327 +#endif
20328 +
20329 if (unlikely(expand_stack(vma, address))) {
20330 bad_area(regs, error_code, address);
20331 return;
20332 @@ -1146,3 +1416,199 @@ good_area:
20333
20334 up_read(&mm->mmap_sem);
20335 }
20336 +
20337 +#ifdef CONFIG_PAX_EMUTRAMP
20338 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
20339 +{
20340 + int err;
20341 +
20342 + do { /* PaX: gcc trampoline emulation #1 */
20343 + unsigned char mov1, mov2;
20344 + unsigned short jmp;
20345 + unsigned int addr1, addr2;
20346 +
20347 +#ifdef CONFIG_X86_64
20348 + if ((regs->ip + 11) >> 32)
20349 + break;
20350 +#endif
20351 +
20352 + err = get_user(mov1, (unsigned char __user *)regs->ip);
20353 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20354 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
20355 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20356 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
20357 +
20358 + if (err)
20359 + break;
20360 +
20361 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
20362 + regs->cx = addr1;
20363 + regs->ax = addr2;
20364 + regs->ip = addr2;
20365 + return 2;
20366 + }
20367 + } while (0);
20368 +
20369 + do { /* PaX: gcc trampoline emulation #2 */
20370 + unsigned char mov, jmp;
20371 + unsigned int addr1, addr2;
20372 +
20373 +#ifdef CONFIG_X86_64
20374 + if ((regs->ip + 9) >> 32)
20375 + break;
20376 +#endif
20377 +
20378 + err = get_user(mov, (unsigned char __user *)regs->ip);
20379 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20380 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
20381 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20382 +
20383 + if (err)
20384 + break;
20385 +
20386 + if (mov == 0xB9 && jmp == 0xE9) {
20387 + regs->cx = addr1;
20388 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
20389 + return 2;
20390 + }
20391 + } while (0);
20392 +
20393 + return 1; /* PaX in action */
20394 +}
20395 +
20396 +#ifdef CONFIG_X86_64
20397 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
20398 +{
20399 + int err;
20400 +
20401 + do { /* PaX: gcc trampoline emulation #1 */
20402 + unsigned short mov1, mov2, jmp1;
20403 + unsigned char jmp2;
20404 + unsigned int addr1;
20405 + unsigned long addr2;
20406 +
20407 + err = get_user(mov1, (unsigned short __user *)regs->ip);
20408 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
20409 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
20410 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
20411 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
20412 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
20413 +
20414 + if (err)
20415 + break;
20416 +
20417 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20418 + regs->r11 = addr1;
20419 + regs->r10 = addr2;
20420 + regs->ip = addr1;
20421 + return 2;
20422 + }
20423 + } while (0);
20424 +
20425 + do { /* PaX: gcc trampoline emulation #2 */
20426 + unsigned short mov1, mov2, jmp1;
20427 + unsigned char jmp2;
20428 + unsigned long addr1, addr2;
20429 +
20430 + err = get_user(mov1, (unsigned short __user *)regs->ip);
20431 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
20432 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
20433 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
20434 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
20435 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
20436 +
20437 + if (err)
20438 + break;
20439 +
20440 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20441 + regs->r11 = addr1;
20442 + regs->r10 = addr2;
20443 + regs->ip = addr1;
20444 + return 2;
20445 + }
20446 + } while (0);
20447 +
20448 + return 1; /* PaX in action */
20449 +}
20450 +#endif
20451 +
20452 +/*
20453 + * PaX: decide what to do with offenders (regs->ip = fault address)
20454 + *
20455 + * returns 1 when task should be killed
20456 + * 2 when gcc trampoline was detected
20457 + */
20458 +static int pax_handle_fetch_fault(struct pt_regs *regs)
20459 +{
20460 + if (v8086_mode(regs))
20461 + return 1;
20462 +
20463 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
20464 + return 1;
20465 +
20466 +#ifdef CONFIG_X86_32
20467 + return pax_handle_fetch_fault_32(regs);
20468 +#else
20469 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
20470 + return pax_handle_fetch_fault_32(regs);
20471 + else
20472 + return pax_handle_fetch_fault_64(regs);
20473 +#endif
20474 +}
20475 +#endif
20476 +
20477 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20478 +void pax_report_insns(void *pc, void *sp)
20479 +{
20480 + long i;
20481 +
20482 + printk(KERN_ERR "PAX: bytes at PC: ");
20483 + for (i = 0; i < 20; i++) {
20484 + unsigned char c;
20485 + if (get_user(c, (__force unsigned char __user *)pc+i))
20486 + printk(KERN_CONT "?? ");
20487 + else
20488 + printk(KERN_CONT "%02x ", c);
20489 + }
20490 + printk("\n");
20491 +
20492 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
20493 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
20494 + unsigned long c;
20495 + if (get_user(c, (__force unsigned long __user *)sp+i))
20496 +#ifdef CONFIG_X86_32
20497 + printk(KERN_CONT "???????? ");
20498 +#else
20499 + printk(KERN_CONT "???????????????? ");
20500 +#endif
20501 + else
20502 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
20503 + }
20504 + printk("\n");
20505 +}
20506 +#endif
20507 +
20508 +/**
20509 + * probe_kernel_write(): safely attempt to write to a location
20510 + * @dst: address to write to
20511 + * @src: pointer to the data that shall be written
20512 + * @size: size of the data chunk
20513 + *
20514 + * Safely write to address @dst from the buffer at @src. If a kernel fault
20515 + * happens, handle that and return -EFAULT.
20516 + */
20517 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
20518 +{
20519 + long ret;
20520 + mm_segment_t old_fs = get_fs();
20521 +
20522 + set_fs(KERNEL_DS);
20523 + pagefault_disable();
20524 + pax_open_kernel();
20525 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
20526 + pax_close_kernel();
20527 + pagefault_enable();
20528 + set_fs(old_fs);
20529 +
20530 + return ret ? -EFAULT : 0;
20531 +}
20532 diff -urNp linux-2.6.32.42/arch/x86/mm/gup.c linux-2.6.32.42/arch/x86/mm/gup.c
20533 --- linux-2.6.32.42/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
20534 +++ linux-2.6.32.42/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
20535 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
20536 addr = start;
20537 len = (unsigned long) nr_pages << PAGE_SHIFT;
20538 end = start + len;
20539 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20540 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20541 (void __user *)start, len)))
20542 return 0;
20543
20544 diff -urNp linux-2.6.32.42/arch/x86/mm/highmem_32.c linux-2.6.32.42/arch/x86/mm/highmem_32.c
20545 --- linux-2.6.32.42/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
20546 +++ linux-2.6.32.42/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
20547 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
20548 idx = type + KM_TYPE_NR*smp_processor_id();
20549 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20550 BUG_ON(!pte_none(*(kmap_pte-idx)));
20551 +
20552 + pax_open_kernel();
20553 set_pte(kmap_pte-idx, mk_pte(page, prot));
20554 + pax_close_kernel();
20555
20556 return (void *)vaddr;
20557 }
20558 diff -urNp linux-2.6.32.42/arch/x86/mm/hugetlbpage.c linux-2.6.32.42/arch/x86/mm/hugetlbpage.c
20559 --- linux-2.6.32.42/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
20560 +++ linux-2.6.32.42/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
20561 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
20562 struct hstate *h = hstate_file(file);
20563 struct mm_struct *mm = current->mm;
20564 struct vm_area_struct *vma;
20565 - unsigned long start_addr;
20566 + unsigned long start_addr, pax_task_size = TASK_SIZE;
20567 +
20568 +#ifdef CONFIG_PAX_SEGMEXEC
20569 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20570 + pax_task_size = SEGMEXEC_TASK_SIZE;
20571 +#endif
20572 +
20573 + pax_task_size -= PAGE_SIZE;
20574
20575 if (len > mm->cached_hole_size) {
20576 - start_addr = mm->free_area_cache;
20577 + start_addr = mm->free_area_cache;
20578 } else {
20579 - start_addr = TASK_UNMAPPED_BASE;
20580 - mm->cached_hole_size = 0;
20581 + start_addr = mm->mmap_base;
20582 + mm->cached_hole_size = 0;
20583 }
20584
20585 full_search:
20586 @@ -281,26 +288,27 @@ full_search:
20587
20588 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20589 /* At this point: (!vma || addr < vma->vm_end). */
20590 - if (TASK_SIZE - len < addr) {
20591 + if (pax_task_size - len < addr) {
20592 /*
20593 * Start a new search - just in case we missed
20594 * some holes.
20595 */
20596 - if (start_addr != TASK_UNMAPPED_BASE) {
20597 - start_addr = TASK_UNMAPPED_BASE;
20598 + if (start_addr != mm->mmap_base) {
20599 + start_addr = mm->mmap_base;
20600 mm->cached_hole_size = 0;
20601 goto full_search;
20602 }
20603 return -ENOMEM;
20604 }
20605 - if (!vma || addr + len <= vma->vm_start) {
20606 - mm->free_area_cache = addr + len;
20607 - return addr;
20608 - }
20609 + if (check_heap_stack_gap(vma, addr, len))
20610 + break;
20611 if (addr + mm->cached_hole_size < vma->vm_start)
20612 mm->cached_hole_size = vma->vm_start - addr;
20613 addr = ALIGN(vma->vm_end, huge_page_size(h));
20614 }
20615 +
20616 + mm->free_area_cache = addr + len;
20617 + return addr;
20618 }
20619
20620 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
20621 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
20622 {
20623 struct hstate *h = hstate_file(file);
20624 struct mm_struct *mm = current->mm;
20625 - struct vm_area_struct *vma, *prev_vma;
20626 - unsigned long base = mm->mmap_base, addr = addr0;
20627 + struct vm_area_struct *vma;
20628 + unsigned long base = mm->mmap_base, addr;
20629 unsigned long largest_hole = mm->cached_hole_size;
20630 - int first_time = 1;
20631
20632 /* don't allow allocations above current base */
20633 if (mm->free_area_cache > base)
20634 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
20635 largest_hole = 0;
20636 mm->free_area_cache = base;
20637 }
20638 -try_again:
20639 +
20640 /* make sure it can fit in the remaining address space */
20641 if (mm->free_area_cache < len)
20642 goto fail;
20643
20644 /* either no address requested or cant fit in requested address hole */
20645 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
20646 + addr = (mm->free_area_cache - len);
20647 do {
20648 + addr &= huge_page_mask(h);
20649 + vma = find_vma(mm, addr);
20650 /*
20651 * Lookup failure means no vma is above this address,
20652 * i.e. return with success:
20653 - */
20654 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
20655 - return addr;
20656 -
20657 - /*
20658 * new region fits between prev_vma->vm_end and
20659 * vma->vm_start, use it:
20660 */
20661 - if (addr + len <= vma->vm_start &&
20662 - (!prev_vma || (addr >= prev_vma->vm_end))) {
20663 + if (check_heap_stack_gap(vma, addr, len)) {
20664 /* remember the address as a hint for next time */
20665 - mm->cached_hole_size = largest_hole;
20666 - return (mm->free_area_cache = addr);
20667 - } else {
20668 - /* pull free_area_cache down to the first hole */
20669 - if (mm->free_area_cache == vma->vm_end) {
20670 - mm->free_area_cache = vma->vm_start;
20671 - mm->cached_hole_size = largest_hole;
20672 - }
20673 + mm->cached_hole_size = largest_hole;
20674 + return (mm->free_area_cache = addr);
20675 + }
20676 + /* pull free_area_cache down to the first hole */
20677 + if (mm->free_area_cache == vma->vm_end) {
20678 + mm->free_area_cache = vma->vm_start;
20679 + mm->cached_hole_size = largest_hole;
20680 }
20681
20682 /* remember the largest hole we saw so far */
20683 if (addr + largest_hole < vma->vm_start)
20684 - largest_hole = vma->vm_start - addr;
20685 + largest_hole = vma->vm_start - addr;
20686
20687 /* try just below the current vma->vm_start */
20688 - addr = (vma->vm_start - len) & huge_page_mask(h);
20689 - } while (len <= vma->vm_start);
20690 + addr = skip_heap_stack_gap(vma, len);
20691 + } while (!IS_ERR_VALUE(addr));
20692
20693 fail:
20694 /*
20695 - * if hint left us with no space for the requested
20696 - * mapping then try again:
20697 - */
20698 - if (first_time) {
20699 - mm->free_area_cache = base;
20700 - largest_hole = 0;
20701 - first_time = 0;
20702 - goto try_again;
20703 - }
20704 - /*
20705 * A failed mmap() very likely causes application failure,
20706 * so fall back to the bottom-up function here. This scenario
20707 * can happen with large stack limits and large mmap()
20708 * allocations.
20709 */
20710 - mm->free_area_cache = TASK_UNMAPPED_BASE;
20711 +
20712 +#ifdef CONFIG_PAX_SEGMEXEC
20713 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20714 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20715 + else
20716 +#endif
20717 +
20718 + mm->mmap_base = TASK_UNMAPPED_BASE;
20719 +
20720 +#ifdef CONFIG_PAX_RANDMMAP
20721 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20722 + mm->mmap_base += mm->delta_mmap;
20723 +#endif
20724 +
20725 + mm->free_area_cache = mm->mmap_base;
20726 mm->cached_hole_size = ~0UL;
20727 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
20728 len, pgoff, flags);
20729 @@ -387,6 +393,7 @@ fail:
20730 /*
20731 * Restore the topdown base:
20732 */
20733 + mm->mmap_base = base;
20734 mm->free_area_cache = base;
20735 mm->cached_hole_size = ~0UL;
20736
20737 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
20738 struct hstate *h = hstate_file(file);
20739 struct mm_struct *mm = current->mm;
20740 struct vm_area_struct *vma;
20741 + unsigned long pax_task_size = TASK_SIZE;
20742
20743 if (len & ~huge_page_mask(h))
20744 return -EINVAL;
20745 - if (len > TASK_SIZE)
20746 +
20747 +#ifdef CONFIG_PAX_SEGMEXEC
20748 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20749 + pax_task_size = SEGMEXEC_TASK_SIZE;
20750 +#endif
20751 +
20752 + pax_task_size -= PAGE_SIZE;
20753 +
20754 + if (len > pax_task_size)
20755 return -ENOMEM;
20756
20757 if (flags & MAP_FIXED) {
20758 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
20759 if (addr) {
20760 addr = ALIGN(addr, huge_page_size(h));
20761 vma = find_vma(mm, addr);
20762 - if (TASK_SIZE - len >= addr &&
20763 - (!vma || addr + len <= vma->vm_start))
20764 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
20765 return addr;
20766 }
20767 if (mm->get_unmapped_area == arch_get_unmapped_area)
20768 diff -urNp linux-2.6.32.42/arch/x86/mm/init_32.c linux-2.6.32.42/arch/x86/mm/init_32.c
20769 --- linux-2.6.32.42/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
20770 +++ linux-2.6.32.42/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
20771 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
20772 }
20773
20774 /*
20775 - * Creates a middle page table and puts a pointer to it in the
20776 - * given global directory entry. This only returns the gd entry
20777 - * in non-PAE compilation mode, since the middle layer is folded.
20778 - */
20779 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
20780 -{
20781 - pud_t *pud;
20782 - pmd_t *pmd_table;
20783 -
20784 -#ifdef CONFIG_X86_PAE
20785 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
20786 - if (after_bootmem)
20787 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
20788 - else
20789 - pmd_table = (pmd_t *)alloc_low_page();
20790 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
20791 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
20792 - pud = pud_offset(pgd, 0);
20793 - BUG_ON(pmd_table != pmd_offset(pud, 0));
20794 -
20795 - return pmd_table;
20796 - }
20797 -#endif
20798 - pud = pud_offset(pgd, 0);
20799 - pmd_table = pmd_offset(pud, 0);
20800 -
20801 - return pmd_table;
20802 -}
20803 -
20804 -/*
20805 * Create a page table and place a pointer to it in a middle page
20806 * directory entry:
20807 */
20808 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
20809 page_table = (pte_t *)alloc_low_page();
20810
20811 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20812 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20813 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20814 +#else
20815 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20816 +#endif
20817 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20818 }
20819
20820 return pte_offset_kernel(pmd, 0);
20821 }
20822
20823 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
20824 +{
20825 + pud_t *pud;
20826 + pmd_t *pmd_table;
20827 +
20828 + pud = pud_offset(pgd, 0);
20829 + pmd_table = pmd_offset(pud, 0);
20830 +
20831 + return pmd_table;
20832 +}
20833 +
20834 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20835 {
20836 int pgd_idx = pgd_index(vaddr);
20837 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
20838 int pgd_idx, pmd_idx;
20839 unsigned long vaddr;
20840 pgd_t *pgd;
20841 + pud_t *pud;
20842 pmd_t *pmd;
20843 pte_t *pte = NULL;
20844
20845 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
20846 pgd = pgd_base + pgd_idx;
20847
20848 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
20849 - pmd = one_md_table_init(pgd);
20850 - pmd = pmd + pmd_index(vaddr);
20851 + pud = pud_offset(pgd, vaddr);
20852 + pmd = pmd_offset(pud, vaddr);
20853 +
20854 +#ifdef CONFIG_X86_PAE
20855 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20856 +#endif
20857 +
20858 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
20859 pmd++, pmd_idx++) {
20860 pte = page_table_kmap_check(one_page_table_init(pmd),
20861 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
20862 }
20863 }
20864
20865 -static inline int is_kernel_text(unsigned long addr)
20866 +static inline int is_kernel_text(unsigned long start, unsigned long end)
20867 {
20868 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
20869 - return 1;
20870 - return 0;
20871 + if ((start > ktla_ktva((unsigned long)_etext) ||
20872 + end <= ktla_ktva((unsigned long)_stext)) &&
20873 + (start > ktla_ktva((unsigned long)_einittext) ||
20874 + end <= ktla_ktva((unsigned long)_sinittext)) &&
20875 +
20876 +#ifdef CONFIG_ACPI_SLEEP
20877 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
20878 +#endif
20879 +
20880 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
20881 + return 0;
20882 + return 1;
20883 }
20884
20885 /*
20886 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
20887 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
20888 unsigned long start_pfn, end_pfn;
20889 pgd_t *pgd_base = swapper_pg_dir;
20890 - int pgd_idx, pmd_idx, pte_ofs;
20891 + unsigned int pgd_idx, pmd_idx, pte_ofs;
20892 unsigned long pfn;
20893 pgd_t *pgd;
20894 + pud_t *pud;
20895 pmd_t *pmd;
20896 pte_t *pte;
20897 unsigned pages_2m, pages_4k;
20898 @@ -278,8 +279,13 @@ repeat:
20899 pfn = start_pfn;
20900 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20901 pgd = pgd_base + pgd_idx;
20902 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
20903 - pmd = one_md_table_init(pgd);
20904 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
20905 + pud = pud_offset(pgd, 0);
20906 + pmd = pmd_offset(pud, 0);
20907 +
20908 +#ifdef CONFIG_X86_PAE
20909 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20910 +#endif
20911
20912 if (pfn >= end_pfn)
20913 continue;
20914 @@ -291,14 +297,13 @@ repeat:
20915 #endif
20916 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
20917 pmd++, pmd_idx++) {
20918 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
20919 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
20920
20921 /*
20922 * Map with big pages if possible, otherwise
20923 * create normal page tables:
20924 */
20925 if (use_pse) {
20926 - unsigned int addr2;
20927 pgprot_t prot = PAGE_KERNEL_LARGE;
20928 /*
20929 * first pass will use the same initial
20930 @@ -308,11 +313,7 @@ repeat:
20931 __pgprot(PTE_IDENT_ATTR |
20932 _PAGE_PSE);
20933
20934 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
20935 - PAGE_OFFSET + PAGE_SIZE-1;
20936 -
20937 - if (is_kernel_text(addr) ||
20938 - is_kernel_text(addr2))
20939 + if (is_kernel_text(address, address + PMD_SIZE))
20940 prot = PAGE_KERNEL_LARGE_EXEC;
20941
20942 pages_2m++;
20943 @@ -329,7 +330,7 @@ repeat:
20944 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20945 pte += pte_ofs;
20946 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
20947 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
20948 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
20949 pgprot_t prot = PAGE_KERNEL;
20950 /*
20951 * first pass will use the same initial
20952 @@ -337,7 +338,7 @@ repeat:
20953 */
20954 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
20955
20956 - if (is_kernel_text(addr))
20957 + if (is_kernel_text(address, address + PAGE_SIZE))
20958 prot = PAGE_KERNEL_EXEC;
20959
20960 pages_4k++;
20961 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
20962
20963 pud = pud_offset(pgd, va);
20964 pmd = pmd_offset(pud, va);
20965 - if (!pmd_present(*pmd))
20966 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
20967 break;
20968
20969 pte = pte_offset_kernel(pmd, va);
20970 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
20971
20972 static void __init pagetable_init(void)
20973 {
20974 - pgd_t *pgd_base = swapper_pg_dir;
20975 -
20976 - permanent_kmaps_init(pgd_base);
20977 + permanent_kmaps_init(swapper_pg_dir);
20978 }
20979
20980 #ifdef CONFIG_ACPI_SLEEP
20981 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
20982 * ACPI suspend needs this for resume, because things like the intel-agp
20983 * driver might have split up a kernel 4MB mapping.
20984 */
20985 -char swsusp_pg_dir[PAGE_SIZE]
20986 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
20987 __attribute__ ((aligned(PAGE_SIZE)));
20988
20989 static inline void save_pg_dir(void)
20990 {
20991 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
20992 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
20993 }
20994 #else /* !CONFIG_ACPI_SLEEP */
20995 static inline void save_pg_dir(void)
20996 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
20997 flush_tlb_all();
20998 }
20999
21000 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21001 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21002 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21003
21004 /* user-defined highmem size */
21005 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
21006 * Initialize the boot-time allocator (with low memory only):
21007 */
21008 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
21009 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21010 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21011 PAGE_SIZE);
21012 if (bootmap == -1L)
21013 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
21014 @@ -864,6 +863,12 @@ void __init mem_init(void)
21015
21016 pci_iommu_alloc();
21017
21018 +#ifdef CONFIG_PAX_PER_CPU_PGD
21019 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21020 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21021 + KERNEL_PGD_PTRS);
21022 +#endif
21023 +
21024 #ifdef CONFIG_FLATMEM
21025 BUG_ON(!mem_map);
21026 #endif
21027 @@ -881,7 +886,7 @@ void __init mem_init(void)
21028 set_highmem_pages_init();
21029
21030 codesize = (unsigned long) &_etext - (unsigned long) &_text;
21031 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
21032 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
21033 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
21034
21035 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
21036 @@ -923,10 +928,10 @@ void __init mem_init(void)
21037 ((unsigned long)&__init_end -
21038 (unsigned long)&__init_begin) >> 10,
21039
21040 - (unsigned long)&_etext, (unsigned long)&_edata,
21041 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
21042 + (unsigned long)&_sdata, (unsigned long)&_edata,
21043 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
21044
21045 - (unsigned long)&_text, (unsigned long)&_etext,
21046 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21047 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21048
21049 /*
21050 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
21051 if (!kernel_set_to_readonly)
21052 return;
21053
21054 + start = ktla_ktva(start);
21055 pr_debug("Set kernel text: %lx - %lx for read write\n",
21056 start, start+size);
21057
21058 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
21059 if (!kernel_set_to_readonly)
21060 return;
21061
21062 + start = ktla_ktva(start);
21063 pr_debug("Set kernel text: %lx - %lx for read only\n",
21064 start, start+size);
21065
21066 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
21067 unsigned long start = PFN_ALIGN(_text);
21068 unsigned long size = PFN_ALIGN(_etext) - start;
21069
21070 + start = ktla_ktva(start);
21071 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21072 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21073 size >> 10);
21074 diff -urNp linux-2.6.32.42/arch/x86/mm/init_64.c linux-2.6.32.42/arch/x86/mm/init_64.c
21075 --- linux-2.6.32.42/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
21076 +++ linux-2.6.32.42/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
21077 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21078 pmd = fill_pmd(pud, vaddr);
21079 pte = fill_pte(pmd, vaddr);
21080
21081 + pax_open_kernel();
21082 set_pte(pte, new_pte);
21083 + pax_close_kernel();
21084
21085 /*
21086 * It's enough to flush this one mapping.
21087 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
21088 pgd = pgd_offset_k((unsigned long)__va(phys));
21089 if (pgd_none(*pgd)) {
21090 pud = (pud_t *) spp_getpage();
21091 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21092 - _PAGE_USER));
21093 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21094 }
21095 pud = pud_offset(pgd, (unsigned long)__va(phys));
21096 if (pud_none(*pud)) {
21097 pmd = (pmd_t *) spp_getpage();
21098 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21099 - _PAGE_USER));
21100 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21101 }
21102 pmd = pmd_offset(pud, phys);
21103 BUG_ON(!pmd_none(*pmd));
21104 @@ -675,6 +675,12 @@ void __init mem_init(void)
21105
21106 pci_iommu_alloc();
21107
21108 +#ifdef CONFIG_PAX_PER_CPU_PGD
21109 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21110 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21111 + KERNEL_PGD_PTRS);
21112 +#endif
21113 +
21114 /* clear_bss() already clear the empty_zero_page */
21115
21116 reservedpages = 0;
21117 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
21118 static struct vm_area_struct gate_vma = {
21119 .vm_start = VSYSCALL_START,
21120 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21121 - .vm_page_prot = PAGE_READONLY_EXEC,
21122 - .vm_flags = VM_READ | VM_EXEC
21123 + .vm_page_prot = PAGE_READONLY,
21124 + .vm_flags = VM_READ
21125 };
21126
21127 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
21128 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
21129
21130 const char *arch_vma_name(struct vm_area_struct *vma)
21131 {
21132 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21133 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21134 return "[vdso]";
21135 if (vma == &gate_vma)
21136 return "[vsyscall]";
21137 diff -urNp linux-2.6.32.42/arch/x86/mm/init.c linux-2.6.32.42/arch/x86/mm/init.c
21138 --- linux-2.6.32.42/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
21139 +++ linux-2.6.32.42/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
21140 @@ -69,11 +69,7 @@ static void __init find_early_table_spac
21141 * cause a hotspot and fill up ZONE_DMA. The page tables
21142 * need roughly 0.5KB per GB.
21143 */
21144 -#ifdef CONFIG_X86_32
21145 - start = 0x7000;
21146 -#else
21147 - start = 0x8000;
21148 -#endif
21149 + start = 0x100000;
21150 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
21151 tables, PAGE_SIZE);
21152 if (e820_table_start == -1UL)
21153 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
21154 #endif
21155
21156 set_nx();
21157 - if (nx_enabled)
21158 + if (nx_enabled && cpu_has_nx)
21159 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
21160
21161 /* Enable PSE if available */
21162 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
21163 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
21164 * mmio resources as well as potential bios/acpi data regions.
21165 */
21166 +
21167 int devmem_is_allowed(unsigned long pagenr)
21168 {
21169 +#ifdef CONFIG_GRKERNSEC_KMEM
21170 + /* allow BDA */
21171 + if (!pagenr)
21172 + return 1;
21173 + /* allow EBDA */
21174 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
21175 + return 1;
21176 + /* allow ISA/video mem */
21177 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21178 + return 1;
21179 + /* throw out everything else below 1MB */
21180 + if (pagenr <= 256)
21181 + return 0;
21182 +#else
21183 if (pagenr <= 256)
21184 return 1;
21185 +#endif
21186 +
21187 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21188 return 0;
21189 if (!page_is_ram(pagenr))
21190 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
21191
21192 void free_initmem(void)
21193 {
21194 +
21195 +#ifdef CONFIG_PAX_KERNEXEC
21196 +#ifdef CONFIG_X86_32
21197 + /* PaX: limit KERNEL_CS to actual size */
21198 + unsigned long addr, limit;
21199 + struct desc_struct d;
21200 + int cpu;
21201 +
21202 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21203 + limit = (limit - 1UL) >> PAGE_SHIFT;
21204 +
21205 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21206 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
21207 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21208 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
21209 + }
21210 +
21211 + /* PaX: make KERNEL_CS read-only */
21212 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
21213 + if (!paravirt_enabled())
21214 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
21215 +/*
21216 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
21217 + pgd = pgd_offset_k(addr);
21218 + pud = pud_offset(pgd, addr);
21219 + pmd = pmd_offset(pud, addr);
21220 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21221 + }
21222 +*/
21223 +#ifdef CONFIG_X86_PAE
21224 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
21225 +/*
21226 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
21227 + pgd = pgd_offset_k(addr);
21228 + pud = pud_offset(pgd, addr);
21229 + pmd = pmd_offset(pud, addr);
21230 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21231 + }
21232 +*/
21233 +#endif
21234 +
21235 +#ifdef CONFIG_MODULES
21236 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
21237 +#endif
21238 +
21239 +#else
21240 + pgd_t *pgd;
21241 + pud_t *pud;
21242 + pmd_t *pmd;
21243 + unsigned long addr, end;
21244 +
21245 + /* PaX: make kernel code/rodata read-only, rest non-executable */
21246 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
21247 + pgd = pgd_offset_k(addr);
21248 + pud = pud_offset(pgd, addr);
21249 + pmd = pmd_offset(pud, addr);
21250 + if (!pmd_present(*pmd))
21251 + continue;
21252 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
21253 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21254 + else
21255 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21256 + }
21257 +
21258 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
21259 + end = addr + KERNEL_IMAGE_SIZE;
21260 + for (; addr < end; addr += PMD_SIZE) {
21261 + pgd = pgd_offset_k(addr);
21262 + pud = pud_offset(pgd, addr);
21263 + pmd = pmd_offset(pud, addr);
21264 + if (!pmd_present(*pmd))
21265 + continue;
21266 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
21267 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21268 + }
21269 +#endif
21270 +
21271 + flush_tlb_all();
21272 +#endif
21273 +
21274 free_init_pages("unused kernel memory",
21275 (unsigned long)(&__init_begin),
21276 (unsigned long)(&__init_end));
21277 diff -urNp linux-2.6.32.42/arch/x86/mm/iomap_32.c linux-2.6.32.42/arch/x86/mm/iomap_32.c
21278 --- linux-2.6.32.42/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
21279 +++ linux-2.6.32.42/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
21280 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
21281 debug_kmap_atomic(type);
21282 idx = type + KM_TYPE_NR * smp_processor_id();
21283 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21284 +
21285 + pax_open_kernel();
21286 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
21287 + pax_close_kernel();
21288 +
21289 arch_flush_lazy_mmu_mode();
21290
21291 return (void *)vaddr;
21292 diff -urNp linux-2.6.32.42/arch/x86/mm/ioremap.c linux-2.6.32.42/arch/x86/mm/ioremap.c
21293 --- linux-2.6.32.42/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
21294 +++ linux-2.6.32.42/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
21295 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
21296 * Second special case: Some BIOSen report the PC BIOS
21297 * area (640->1Mb) as ram even though it is not.
21298 */
21299 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
21300 - pagenr < (BIOS_END >> PAGE_SHIFT))
21301 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
21302 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21303 return 0;
21304
21305 for (i = 0; i < e820.nr_map; i++) {
21306 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
21307 /*
21308 * Don't allow anybody to remap normal RAM that we're using..
21309 */
21310 - for (pfn = phys_addr >> PAGE_SHIFT;
21311 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
21312 - pfn++) {
21313 -
21314 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
21315 int is_ram = page_is_ram(pfn);
21316
21317 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
21318 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
21319 return NULL;
21320 WARN_ON_ONCE(is_ram);
21321 }
21322 @@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
21323 early_param("early_ioremap_debug", early_ioremap_debug_setup);
21324
21325 static __initdata int after_paging_init;
21326 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
21327 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
21328
21329 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
21330 {
21331 @@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
21332 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
21333
21334 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
21335 - memset(bm_pte, 0, sizeof(bm_pte));
21336 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
21337 + pmd_populate_user(&init_mm, pmd, bm_pte);
21338
21339 /*
21340 * The boot-ioremap range spans multiple pmds, for which
21341 diff -urNp linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c
21342 --- linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
21343 +++ linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
21344 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
21345 * memory (e.g. tracked pages)? For now, we need this to avoid
21346 * invoking kmemcheck for PnP BIOS calls.
21347 */
21348 - if (regs->flags & X86_VM_MASK)
21349 + if (v8086_mode(regs))
21350 return false;
21351 - if (regs->cs != __KERNEL_CS)
21352 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
21353 return false;
21354
21355 pte = kmemcheck_pte_lookup(address);
21356 diff -urNp linux-2.6.32.42/arch/x86/mm/mmap.c linux-2.6.32.42/arch/x86/mm/mmap.c
21357 --- linux-2.6.32.42/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
21358 +++ linux-2.6.32.42/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
21359 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
21360 * Leave an at least ~128 MB hole with possible stack randomization.
21361 */
21362 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
21363 -#define MAX_GAP (TASK_SIZE/6*5)
21364 +#define MAX_GAP (pax_task_size/6*5)
21365
21366 /*
21367 * True on X86_32 or when emulating IA32 on X86_64
21368 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
21369 return rnd << PAGE_SHIFT;
21370 }
21371
21372 -static unsigned long mmap_base(void)
21373 +static unsigned long mmap_base(struct mm_struct *mm)
21374 {
21375 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
21376 + unsigned long pax_task_size = TASK_SIZE;
21377 +
21378 +#ifdef CONFIG_PAX_SEGMEXEC
21379 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21380 + pax_task_size = SEGMEXEC_TASK_SIZE;
21381 +#endif
21382
21383 if (gap < MIN_GAP)
21384 gap = MIN_GAP;
21385 else if (gap > MAX_GAP)
21386 gap = MAX_GAP;
21387
21388 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
21389 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
21390 }
21391
21392 /*
21393 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
21394 * does, but not when emulating X86_32
21395 */
21396 -static unsigned long mmap_legacy_base(void)
21397 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
21398 {
21399 - if (mmap_is_ia32())
21400 + if (mmap_is_ia32()) {
21401 +
21402 +#ifdef CONFIG_PAX_SEGMEXEC
21403 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21404 + return SEGMEXEC_TASK_UNMAPPED_BASE;
21405 + else
21406 +#endif
21407 +
21408 return TASK_UNMAPPED_BASE;
21409 - else
21410 + } else
21411 return TASK_UNMAPPED_BASE + mmap_rnd();
21412 }
21413
21414 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
21415 void arch_pick_mmap_layout(struct mm_struct *mm)
21416 {
21417 if (mmap_is_legacy()) {
21418 - mm->mmap_base = mmap_legacy_base();
21419 + mm->mmap_base = mmap_legacy_base(mm);
21420 +
21421 +#ifdef CONFIG_PAX_RANDMMAP
21422 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21423 + mm->mmap_base += mm->delta_mmap;
21424 +#endif
21425 +
21426 mm->get_unmapped_area = arch_get_unmapped_area;
21427 mm->unmap_area = arch_unmap_area;
21428 } else {
21429 - mm->mmap_base = mmap_base();
21430 + mm->mmap_base = mmap_base(mm);
21431 +
21432 +#ifdef CONFIG_PAX_RANDMMAP
21433 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21434 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
21435 +#endif
21436 +
21437 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
21438 mm->unmap_area = arch_unmap_area_topdown;
21439 }
21440 diff -urNp linux-2.6.32.42/arch/x86/mm/mmio-mod.c linux-2.6.32.42/arch/x86/mm/mmio-mod.c
21441 --- linux-2.6.32.42/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
21442 +++ linux-2.6.32.42/arch/x86/mm/mmio-mod.c 2011-05-04 17:56:28.000000000 -0400
21443 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
21444 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
21445 void __iomem *addr)
21446 {
21447 - static atomic_t next_id;
21448 + static atomic_unchecked_t next_id;
21449 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
21450 /* These are page-unaligned. */
21451 struct mmiotrace_map map = {
21452 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
21453 .private = trace
21454 },
21455 .phys = offset,
21456 - .id = atomic_inc_return(&next_id)
21457 + .id = atomic_inc_return_unchecked(&next_id)
21458 };
21459 map.map_id = trace->id;
21460
21461 diff -urNp linux-2.6.32.42/arch/x86/mm/numa_32.c linux-2.6.32.42/arch/x86/mm/numa_32.c
21462 --- linux-2.6.32.42/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
21463 +++ linux-2.6.32.42/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
21464 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
21465 }
21466 #endif
21467
21468 -extern unsigned long find_max_low_pfn(void);
21469 extern unsigned long highend_pfn, highstart_pfn;
21470
21471 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
21472 diff -urNp linux-2.6.32.42/arch/x86/mm/pageattr.c linux-2.6.32.42/arch/x86/mm/pageattr.c
21473 --- linux-2.6.32.42/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
21474 +++ linux-2.6.32.42/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
21475 @@ -261,16 +261,17 @@ static inline pgprot_t static_protection
21476 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
21477 */
21478 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
21479 - pgprot_val(forbidden) |= _PAGE_NX;
21480 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21481
21482 /*
21483 * The kernel text needs to be executable for obvious reasons
21484 * Does not cover __inittext since that is gone later on. On
21485 * 64bit we do not enforce !NX on the low mapping
21486 */
21487 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
21488 - pgprot_val(forbidden) |= _PAGE_NX;
21489 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
21490 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21491
21492 +#ifdef CONFIG_DEBUG_RODATA
21493 /*
21494 * The .rodata section needs to be read-only. Using the pfn
21495 * catches all aliases.
21496 @@ -278,6 +279,14 @@ static inline pgprot_t static_protection
21497 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
21498 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
21499 pgprot_val(forbidden) |= _PAGE_RW;
21500 +#endif
21501 +
21502 +#ifdef CONFIG_PAX_KERNEXEC
21503 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
21504 + pgprot_val(forbidden) |= _PAGE_RW;
21505 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21506 + }
21507 +#endif
21508
21509 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
21510
21511 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
21512 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
21513 {
21514 /* change init_mm */
21515 + pax_open_kernel();
21516 set_pte_atomic(kpte, pte);
21517 +
21518 #ifdef CONFIG_X86_32
21519 if (!SHARED_KERNEL_PMD) {
21520 +
21521 +#ifdef CONFIG_PAX_PER_CPU_PGD
21522 + unsigned long cpu;
21523 +#else
21524 struct page *page;
21525 +#endif
21526
21527 +#ifdef CONFIG_PAX_PER_CPU_PGD
21528 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21529 + pgd_t *pgd = get_cpu_pgd(cpu);
21530 +#else
21531 list_for_each_entry(page, &pgd_list, lru) {
21532 - pgd_t *pgd;
21533 + pgd_t *pgd = (pgd_t *)page_address(page);
21534 +#endif
21535 +
21536 pud_t *pud;
21537 pmd_t *pmd;
21538
21539 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
21540 + pgd += pgd_index(address);
21541 pud = pud_offset(pgd, address);
21542 pmd = pmd_offset(pud, address);
21543 set_pte_atomic((pte_t *)pmd, pte);
21544 }
21545 }
21546 #endif
21547 + pax_close_kernel();
21548 }
21549
21550 static int
21551 diff -urNp linux-2.6.32.42/arch/x86/mm/pageattr-test.c linux-2.6.32.42/arch/x86/mm/pageattr-test.c
21552 --- linux-2.6.32.42/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
21553 +++ linux-2.6.32.42/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
21554 @@ -36,7 +36,7 @@ enum {
21555
21556 static int pte_testbit(pte_t pte)
21557 {
21558 - return pte_flags(pte) & _PAGE_UNUSED1;
21559 + return pte_flags(pte) & _PAGE_CPA_TEST;
21560 }
21561
21562 struct split_state {
21563 diff -urNp linux-2.6.32.42/arch/x86/mm/pat.c linux-2.6.32.42/arch/x86/mm/pat.c
21564 --- linux-2.6.32.42/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
21565 +++ linux-2.6.32.42/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
21566 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
21567
21568 conflict:
21569 printk(KERN_INFO "%s:%d conflicting memory types "
21570 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
21571 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
21572 new->end, cattr_name(new->type), cattr_name(entry->type));
21573 return -EBUSY;
21574 }
21575 @@ -559,7 +559,7 @@ unlock_ret:
21576
21577 if (err) {
21578 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
21579 - current->comm, current->pid, start, end);
21580 + current->comm, task_pid_nr(current), start, end);
21581 }
21582
21583 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
21584 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
21585 while (cursor < to) {
21586 if (!devmem_is_allowed(pfn)) {
21587 printk(KERN_INFO
21588 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
21589 - current->comm, from, to);
21590 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
21591 + current->comm, from, to, cursor);
21592 return 0;
21593 }
21594 cursor += PAGE_SIZE;
21595 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
21596 printk(KERN_INFO
21597 "%s:%d ioremap_change_attr failed %s "
21598 "for %Lx-%Lx\n",
21599 - current->comm, current->pid,
21600 + current->comm, task_pid_nr(current),
21601 cattr_name(flags),
21602 base, (unsigned long long)(base + size));
21603 return -EINVAL;
21604 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
21605 free_memtype(paddr, paddr + size);
21606 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
21607 " for %Lx-%Lx, got %s\n",
21608 - current->comm, current->pid,
21609 + current->comm, task_pid_nr(current),
21610 cattr_name(want_flags),
21611 (unsigned long long)paddr,
21612 (unsigned long long)(paddr + size),
21613 diff -urNp linux-2.6.32.42/arch/x86/mm/pgtable_32.c linux-2.6.32.42/arch/x86/mm/pgtable_32.c
21614 --- linux-2.6.32.42/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
21615 +++ linux-2.6.32.42/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
21616 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
21617 return;
21618 }
21619 pte = pte_offset_kernel(pmd, vaddr);
21620 +
21621 + pax_open_kernel();
21622 if (pte_val(pteval))
21623 set_pte_at(&init_mm, vaddr, pte, pteval);
21624 else
21625 pte_clear(&init_mm, vaddr, pte);
21626 + pax_close_kernel();
21627
21628 /*
21629 * It's enough to flush this one mapping.
21630 diff -urNp linux-2.6.32.42/arch/x86/mm/pgtable.c linux-2.6.32.42/arch/x86/mm/pgtable.c
21631 --- linux-2.6.32.42/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
21632 +++ linux-2.6.32.42/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
21633 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
21634 list_del(&page->lru);
21635 }
21636
21637 -#define UNSHARED_PTRS_PER_PGD \
21638 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21639 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21640 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
21641
21642 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21643 +{
21644 + while (count--)
21645 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
21646 +}
21647 +#endif
21648 +
21649 +#ifdef CONFIG_PAX_PER_CPU_PGD
21650 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21651 +{
21652 + while (count--)
21653 +
21654 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21655 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
21656 +#else
21657 + *dst++ = *src++;
21658 +#endif
21659 +
21660 +}
21661 +#endif
21662 +
21663 +#ifdef CONFIG_X86_64
21664 +#define pxd_t pud_t
21665 +#define pyd_t pgd_t
21666 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
21667 +#define pxd_free(mm, pud) pud_free((mm), (pud))
21668 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
21669 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
21670 +#define PYD_SIZE PGDIR_SIZE
21671 +#else
21672 +#define pxd_t pmd_t
21673 +#define pyd_t pud_t
21674 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
21675 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
21676 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
21677 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
21678 +#define PYD_SIZE PUD_SIZE
21679 +#endif
21680 +
21681 +#ifdef CONFIG_PAX_PER_CPU_PGD
21682 +static inline void pgd_ctor(pgd_t *pgd) {}
21683 +static inline void pgd_dtor(pgd_t *pgd) {}
21684 +#else
21685 static void pgd_ctor(pgd_t *pgd)
21686 {
21687 /* If the pgd points to a shared pagetable level (either the
21688 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
21689 pgd_list_del(pgd);
21690 spin_unlock_irqrestore(&pgd_lock, flags);
21691 }
21692 +#endif
21693
21694 /*
21695 * List of all pgd's needed for non-PAE so it can invalidate entries
21696 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
21697 * -- wli
21698 */
21699
21700 -#ifdef CONFIG_X86_PAE
21701 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21702 /*
21703 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
21704 * updating the top-level pagetable entries to guarantee the
21705 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
21706 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
21707 * and initialize the kernel pmds here.
21708 */
21709 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
21710 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21711
21712 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
21713 {
21714 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
21715 */
21716 flush_tlb_mm(mm);
21717 }
21718 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
21719 +#define PREALLOCATED_PXDS USER_PGD_PTRS
21720 #else /* !CONFIG_X86_PAE */
21721
21722 /* No need to prepopulate any pagetable entries in non-PAE modes. */
21723 -#define PREALLOCATED_PMDS 0
21724 +#define PREALLOCATED_PXDS 0
21725
21726 #endif /* CONFIG_X86_PAE */
21727
21728 -static void free_pmds(pmd_t *pmds[])
21729 +static void free_pxds(pxd_t *pxds[])
21730 {
21731 int i;
21732
21733 - for(i = 0; i < PREALLOCATED_PMDS; i++)
21734 - if (pmds[i])
21735 - free_page((unsigned long)pmds[i]);
21736 + for(i = 0; i < PREALLOCATED_PXDS; i++)
21737 + if (pxds[i])
21738 + free_page((unsigned long)pxds[i]);
21739 }
21740
21741 -static int preallocate_pmds(pmd_t *pmds[])
21742 +static int preallocate_pxds(pxd_t *pxds[])
21743 {
21744 int i;
21745 bool failed = false;
21746
21747 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
21748 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21749 - if (pmd == NULL)
21750 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
21751 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21752 + if (pxd == NULL)
21753 failed = true;
21754 - pmds[i] = pmd;
21755 + pxds[i] = pxd;
21756 }
21757
21758 if (failed) {
21759 - free_pmds(pmds);
21760 + free_pxds(pxds);
21761 return -ENOMEM;
21762 }
21763
21764 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
21765 * preallocate which never got a corresponding vma will need to be
21766 * freed manually.
21767 */
21768 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21769 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21770 {
21771 int i;
21772
21773 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
21774 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
21775 pgd_t pgd = pgdp[i];
21776
21777 if (pgd_val(pgd) != 0) {
21778 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21779 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21780
21781 - pgdp[i] = native_make_pgd(0);
21782 + set_pgd(pgdp + i, native_make_pgd(0));
21783
21784 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21785 - pmd_free(mm, pmd);
21786 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21787 + pxd_free(mm, pxd);
21788 }
21789 }
21790 }
21791
21792 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21793 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21794 {
21795 - pud_t *pud;
21796 + pyd_t *pyd;
21797 unsigned long addr;
21798 int i;
21799
21800 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21801 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21802 return;
21803
21804 - pud = pud_offset(pgd, 0);
21805 +#ifdef CONFIG_X86_64
21806 + pyd = pyd_offset(mm, 0L);
21807 +#else
21808 + pyd = pyd_offset(pgd, 0L);
21809 +#endif
21810
21811 - for (addr = i = 0; i < PREALLOCATED_PMDS;
21812 - i++, pud++, addr += PUD_SIZE) {
21813 - pmd_t *pmd = pmds[i];
21814 + for (addr = i = 0; i < PREALLOCATED_PXDS;
21815 + i++, pyd++, addr += PYD_SIZE) {
21816 + pxd_t *pxd = pxds[i];
21817
21818 if (i >= KERNEL_PGD_BOUNDARY)
21819 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21820 - sizeof(pmd_t) * PTRS_PER_PMD);
21821 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21822 + sizeof(pxd_t) * PTRS_PER_PMD);
21823
21824 - pud_populate(mm, pud, pmd);
21825 + pyd_populate(mm, pyd, pxd);
21826 }
21827 }
21828
21829 pgd_t *pgd_alloc(struct mm_struct *mm)
21830 {
21831 pgd_t *pgd;
21832 - pmd_t *pmds[PREALLOCATED_PMDS];
21833 + pxd_t *pxds[PREALLOCATED_PXDS];
21834 +
21835 unsigned long flags;
21836
21837 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21838 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21839
21840 mm->pgd = pgd;
21841
21842 - if (preallocate_pmds(pmds) != 0)
21843 + if (preallocate_pxds(pxds) != 0)
21844 goto out_free_pgd;
21845
21846 if (paravirt_pgd_alloc(mm) != 0)
21847 - goto out_free_pmds;
21848 + goto out_free_pxds;
21849
21850 /*
21851 * Make sure that pre-populating the pmds is atomic with
21852 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21853 spin_lock_irqsave(&pgd_lock, flags);
21854
21855 pgd_ctor(pgd);
21856 - pgd_prepopulate_pmd(mm, pgd, pmds);
21857 + pgd_prepopulate_pxd(mm, pgd, pxds);
21858
21859 spin_unlock_irqrestore(&pgd_lock, flags);
21860
21861 return pgd;
21862
21863 -out_free_pmds:
21864 - free_pmds(pmds);
21865 +out_free_pxds:
21866 + free_pxds(pxds);
21867 out_free_pgd:
21868 free_page((unsigned long)pgd);
21869 out:
21870 @@ -287,7 +338,7 @@ out:
21871
21872 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
21873 {
21874 - pgd_mop_up_pmds(mm, pgd);
21875 + pgd_mop_up_pxds(mm, pgd);
21876 pgd_dtor(pgd);
21877 paravirt_pgd_free(mm, pgd);
21878 free_page((unsigned long)pgd);
21879 diff -urNp linux-2.6.32.42/arch/x86/mm/setup_nx.c linux-2.6.32.42/arch/x86/mm/setup_nx.c
21880 --- linux-2.6.32.42/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
21881 +++ linux-2.6.32.42/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
21882 @@ -4,11 +4,10 @@
21883
21884 #include <asm/pgtable.h>
21885
21886 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21887 int nx_enabled;
21888
21889 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21890 -static int disable_nx __cpuinitdata;
21891 -
21892 +#ifndef CONFIG_PAX_PAGEEXEC
21893 /*
21894 * noexec = on|off
21895 *
21896 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
21897 if (!str)
21898 return -EINVAL;
21899 if (!strncmp(str, "on", 2)) {
21900 - __supported_pte_mask |= _PAGE_NX;
21901 - disable_nx = 0;
21902 + nx_enabled = 1;
21903 } else if (!strncmp(str, "off", 3)) {
21904 - disable_nx = 1;
21905 - __supported_pte_mask &= ~_PAGE_NX;
21906 + nx_enabled = 0;
21907 }
21908 return 0;
21909 }
21910 early_param("noexec", noexec_setup);
21911 #endif
21912 +#endif
21913
21914 #ifdef CONFIG_X86_PAE
21915 void __init set_nx(void)
21916 {
21917 - unsigned int v[4], l, h;
21918 + if (!nx_enabled && cpu_has_nx) {
21919 + unsigned l, h;
21920
21921 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
21922 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
21923 -
21924 - if ((v[3] & (1 << 20)) && !disable_nx) {
21925 - rdmsr(MSR_EFER, l, h);
21926 - l |= EFER_NX;
21927 - wrmsr(MSR_EFER, l, h);
21928 - nx_enabled = 1;
21929 - __supported_pte_mask |= _PAGE_NX;
21930 - }
21931 + __supported_pte_mask &= ~_PAGE_NX;
21932 + rdmsr(MSR_EFER, l, h);
21933 + l &= ~EFER_NX;
21934 + wrmsr(MSR_EFER, l, h);
21935 }
21936 }
21937 #else
21938 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
21939 unsigned long efer;
21940
21941 rdmsrl(MSR_EFER, efer);
21942 - if (!(efer & EFER_NX) || disable_nx)
21943 + if (!(efer & EFER_NX) || !nx_enabled)
21944 __supported_pte_mask &= ~_PAGE_NX;
21945 }
21946 #endif
21947 diff -urNp linux-2.6.32.42/arch/x86/mm/tlb.c linux-2.6.32.42/arch/x86/mm/tlb.c
21948 --- linux-2.6.32.42/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
21949 +++ linux-2.6.32.42/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
21950 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
21951 BUG();
21952 cpumask_clear_cpu(cpu,
21953 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
21954 +
21955 +#ifndef CONFIG_PAX_PER_CPU_PGD
21956 load_cr3(swapper_pg_dir);
21957 +#endif
21958 +
21959 }
21960 EXPORT_SYMBOL_GPL(leave_mm);
21961
21962 diff -urNp linux-2.6.32.42/arch/x86/oprofile/backtrace.c linux-2.6.32.42/arch/x86/oprofile/backtrace.c
21963 --- linux-2.6.32.42/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
21964 +++ linux-2.6.32.42/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
21965 @@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
21966 struct frame_head bufhead[2];
21967
21968 /* Also check accessibility of one struct frame_head beyond */
21969 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
21970 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
21971 return NULL;
21972 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
21973 return NULL;
21974 @@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
21975 {
21976 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
21977
21978 - if (!user_mode_vm(regs)) {
21979 + if (!user_mode(regs)) {
21980 unsigned long stack = kernel_stack_pointer(regs);
21981 if (depth)
21982 dump_trace(NULL, regs, (unsigned long *)stack, 0,
21983 diff -urNp linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c
21984 --- linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
21985 +++ linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
21986 @@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
21987 #endif
21988 }
21989
21990 -static int inline addr_increment(void)
21991 +static inline int addr_increment(void)
21992 {
21993 #ifdef CONFIG_SMP
21994 return smp_num_siblings == 2 ? 2 : 1;
21995 diff -urNp linux-2.6.32.42/arch/x86/pci/common.c linux-2.6.32.42/arch/x86/pci/common.c
21996 --- linux-2.6.32.42/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
21997 +++ linux-2.6.32.42/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
21998 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
21999 int pcibios_last_bus = -1;
22000 unsigned long pirq_table_addr;
22001 struct pci_bus *pci_root_bus;
22002 -struct pci_raw_ops *raw_pci_ops;
22003 -struct pci_raw_ops *raw_pci_ext_ops;
22004 +const struct pci_raw_ops *raw_pci_ops;
22005 +const struct pci_raw_ops *raw_pci_ext_ops;
22006
22007 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
22008 int reg, int len, u32 *val)
22009 diff -urNp linux-2.6.32.42/arch/x86/pci/direct.c linux-2.6.32.42/arch/x86/pci/direct.c
22010 --- linux-2.6.32.42/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
22011 +++ linux-2.6.32.42/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
22012 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
22013
22014 #undef PCI_CONF1_ADDRESS
22015
22016 -struct pci_raw_ops pci_direct_conf1 = {
22017 +const struct pci_raw_ops pci_direct_conf1 = {
22018 .read = pci_conf1_read,
22019 .write = pci_conf1_write,
22020 };
22021 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
22022
22023 #undef PCI_CONF2_ADDRESS
22024
22025 -struct pci_raw_ops pci_direct_conf2 = {
22026 +const struct pci_raw_ops pci_direct_conf2 = {
22027 .read = pci_conf2_read,
22028 .write = pci_conf2_write,
22029 };
22030 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
22031 * This should be close to trivial, but it isn't, because there are buggy
22032 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
22033 */
22034 -static int __init pci_sanity_check(struct pci_raw_ops *o)
22035 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
22036 {
22037 u32 x = 0;
22038 int year, devfn;
22039 diff -urNp linux-2.6.32.42/arch/x86/pci/mmconfig_32.c linux-2.6.32.42/arch/x86/pci/mmconfig_32.c
22040 --- linux-2.6.32.42/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
22041 +++ linux-2.6.32.42/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
22042 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
22043 return 0;
22044 }
22045
22046 -static struct pci_raw_ops pci_mmcfg = {
22047 +static const struct pci_raw_ops pci_mmcfg = {
22048 .read = pci_mmcfg_read,
22049 .write = pci_mmcfg_write,
22050 };
22051 diff -urNp linux-2.6.32.42/arch/x86/pci/mmconfig_64.c linux-2.6.32.42/arch/x86/pci/mmconfig_64.c
22052 --- linux-2.6.32.42/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
22053 +++ linux-2.6.32.42/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
22054 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
22055 return 0;
22056 }
22057
22058 -static struct pci_raw_ops pci_mmcfg = {
22059 +static const struct pci_raw_ops pci_mmcfg = {
22060 .read = pci_mmcfg_read,
22061 .write = pci_mmcfg_write,
22062 };
22063 diff -urNp linux-2.6.32.42/arch/x86/pci/numaq_32.c linux-2.6.32.42/arch/x86/pci/numaq_32.c
22064 --- linux-2.6.32.42/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
22065 +++ linux-2.6.32.42/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
22066 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
22067
22068 #undef PCI_CONF1_MQ_ADDRESS
22069
22070 -static struct pci_raw_ops pci_direct_conf1_mq = {
22071 +static const struct pci_raw_ops pci_direct_conf1_mq = {
22072 .read = pci_conf1_mq_read,
22073 .write = pci_conf1_mq_write
22074 };
22075 diff -urNp linux-2.6.32.42/arch/x86/pci/olpc.c linux-2.6.32.42/arch/x86/pci/olpc.c
22076 --- linux-2.6.32.42/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
22077 +++ linux-2.6.32.42/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
22078 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
22079 return 0;
22080 }
22081
22082 -static struct pci_raw_ops pci_olpc_conf = {
22083 +static const struct pci_raw_ops pci_olpc_conf = {
22084 .read = pci_olpc_read,
22085 .write = pci_olpc_write,
22086 };
22087 diff -urNp linux-2.6.32.42/arch/x86/pci/pcbios.c linux-2.6.32.42/arch/x86/pci/pcbios.c
22088 --- linux-2.6.32.42/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
22089 +++ linux-2.6.32.42/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
22090 @@ -56,50 +56,93 @@ union bios32 {
22091 static struct {
22092 unsigned long address;
22093 unsigned short segment;
22094 -} bios32_indirect = { 0, __KERNEL_CS };
22095 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22096
22097 /*
22098 * Returns the entry point for the given service, NULL on error
22099 */
22100
22101 -static unsigned long bios32_service(unsigned long service)
22102 +static unsigned long __devinit bios32_service(unsigned long service)
22103 {
22104 unsigned char return_code; /* %al */
22105 unsigned long address; /* %ebx */
22106 unsigned long length; /* %ecx */
22107 unsigned long entry; /* %edx */
22108 unsigned long flags;
22109 + struct desc_struct d, *gdt;
22110
22111 local_irq_save(flags);
22112 - __asm__("lcall *(%%edi); cld"
22113 +
22114 + gdt = get_cpu_gdt_table(smp_processor_id());
22115 +
22116 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
22117 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22118 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
22119 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22120 +
22121 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
22122 : "=a" (return_code),
22123 "=b" (address),
22124 "=c" (length),
22125 "=d" (entry)
22126 : "0" (service),
22127 "1" (0),
22128 - "D" (&bios32_indirect));
22129 + "D" (&bios32_indirect),
22130 + "r"(__PCIBIOS_DS)
22131 + : "memory");
22132 +
22133 + pax_open_kernel();
22134 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
22135 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
22136 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
22137 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
22138 + pax_close_kernel();
22139 +
22140 local_irq_restore(flags);
22141
22142 switch (return_code) {
22143 - case 0:
22144 - return address + entry;
22145 - case 0x80: /* Not present */
22146 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22147 - return 0;
22148 - default: /* Shouldn't happen */
22149 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22150 - service, return_code);
22151 + case 0: {
22152 + int cpu;
22153 + unsigned char flags;
22154 +
22155 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
22156 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
22157 + printk(KERN_WARNING "bios32_service: not valid\n");
22158 return 0;
22159 + }
22160 + address = address + PAGE_OFFSET;
22161 + length += 16UL; /* some BIOSs underreport this... */
22162 + flags = 4;
22163 + if (length >= 64*1024*1024) {
22164 + length >>= PAGE_SHIFT;
22165 + flags |= 8;
22166 + }
22167 +
22168 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22169 + gdt = get_cpu_gdt_table(cpu);
22170 + pack_descriptor(&d, address, length, 0x9b, flags);
22171 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22172 + pack_descriptor(&d, address, length, 0x93, flags);
22173 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22174 + }
22175 + return entry;
22176 + }
22177 + case 0x80: /* Not present */
22178 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22179 + return 0;
22180 + default: /* Shouldn't happen */
22181 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22182 + service, return_code);
22183 + return 0;
22184 }
22185 }
22186
22187 static struct {
22188 unsigned long address;
22189 unsigned short segment;
22190 -} pci_indirect = { 0, __KERNEL_CS };
22191 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
22192
22193 -static int pci_bios_present;
22194 +static int pci_bios_present __read_only;
22195
22196 static int __devinit check_pcibios(void)
22197 {
22198 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
22199 unsigned long flags, pcibios_entry;
22200
22201 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
22202 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
22203 + pci_indirect.address = pcibios_entry;
22204
22205 local_irq_save(flags);
22206 - __asm__(
22207 - "lcall *(%%edi); cld\n\t"
22208 + __asm__("movw %w6, %%ds\n\t"
22209 + "lcall *%%ss:(%%edi); cld\n\t"
22210 + "push %%ss\n\t"
22211 + "pop %%ds\n\t"
22212 "jc 1f\n\t"
22213 "xor %%ah, %%ah\n"
22214 "1:"
22215 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
22216 "=b" (ebx),
22217 "=c" (ecx)
22218 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
22219 - "D" (&pci_indirect)
22220 + "D" (&pci_indirect),
22221 + "r" (__PCIBIOS_DS)
22222 : "memory");
22223 local_irq_restore(flags);
22224
22225 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
22226
22227 switch (len) {
22228 case 1:
22229 - __asm__("lcall *(%%esi); cld\n\t"
22230 + __asm__("movw %w6, %%ds\n\t"
22231 + "lcall *%%ss:(%%esi); cld\n\t"
22232 + "push %%ss\n\t"
22233 + "pop %%ds\n\t"
22234 "jc 1f\n\t"
22235 "xor %%ah, %%ah\n"
22236 "1:"
22237 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
22238 : "1" (PCIBIOS_READ_CONFIG_BYTE),
22239 "b" (bx),
22240 "D" ((long)reg),
22241 - "S" (&pci_indirect));
22242 + "S" (&pci_indirect),
22243 + "r" (__PCIBIOS_DS));
22244 /*
22245 * Zero-extend the result beyond 8 bits, do not trust the
22246 * BIOS having done it:
22247 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
22248 *value &= 0xff;
22249 break;
22250 case 2:
22251 - __asm__("lcall *(%%esi); cld\n\t"
22252 + __asm__("movw %w6, %%ds\n\t"
22253 + "lcall *%%ss:(%%esi); cld\n\t"
22254 + "push %%ss\n\t"
22255 + "pop %%ds\n\t"
22256 "jc 1f\n\t"
22257 "xor %%ah, %%ah\n"
22258 "1:"
22259 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
22260 : "1" (PCIBIOS_READ_CONFIG_WORD),
22261 "b" (bx),
22262 "D" ((long)reg),
22263 - "S" (&pci_indirect));
22264 + "S" (&pci_indirect),
22265 + "r" (__PCIBIOS_DS));
22266 /*
22267 * Zero-extend the result beyond 16 bits, do not trust the
22268 * BIOS having done it:
22269 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
22270 *value &= 0xffff;
22271 break;
22272 case 4:
22273 - __asm__("lcall *(%%esi); cld\n\t"
22274 + __asm__("movw %w6, %%ds\n\t"
22275 + "lcall *%%ss:(%%esi); cld\n\t"
22276 + "push %%ss\n\t"
22277 + "pop %%ds\n\t"
22278 "jc 1f\n\t"
22279 "xor %%ah, %%ah\n"
22280 "1:"
22281 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
22282 : "1" (PCIBIOS_READ_CONFIG_DWORD),
22283 "b" (bx),
22284 "D" ((long)reg),
22285 - "S" (&pci_indirect));
22286 + "S" (&pci_indirect),
22287 + "r" (__PCIBIOS_DS));
22288 break;
22289 }
22290
22291 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
22292
22293 switch (len) {
22294 case 1:
22295 - __asm__("lcall *(%%esi); cld\n\t"
22296 + __asm__("movw %w6, %%ds\n\t"
22297 + "lcall *%%ss:(%%esi); cld\n\t"
22298 + "push %%ss\n\t"
22299 + "pop %%ds\n\t"
22300 "jc 1f\n\t"
22301 "xor %%ah, %%ah\n"
22302 "1:"
22303 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
22304 "c" (value),
22305 "b" (bx),
22306 "D" ((long)reg),
22307 - "S" (&pci_indirect));
22308 + "S" (&pci_indirect),
22309 + "r" (__PCIBIOS_DS));
22310 break;
22311 case 2:
22312 - __asm__("lcall *(%%esi); cld\n\t"
22313 + __asm__("movw %w6, %%ds\n\t"
22314 + "lcall *%%ss:(%%esi); cld\n\t"
22315 + "push %%ss\n\t"
22316 + "pop %%ds\n\t"
22317 "jc 1f\n\t"
22318 "xor %%ah, %%ah\n"
22319 "1:"
22320 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
22321 "c" (value),
22322 "b" (bx),
22323 "D" ((long)reg),
22324 - "S" (&pci_indirect));
22325 + "S" (&pci_indirect),
22326 + "r" (__PCIBIOS_DS));
22327 break;
22328 case 4:
22329 - __asm__("lcall *(%%esi); cld\n\t"
22330 + __asm__("movw %w6, %%ds\n\t"
22331 + "lcall *%%ss:(%%esi); cld\n\t"
22332 + "push %%ss\n\t"
22333 + "pop %%ds\n\t"
22334 "jc 1f\n\t"
22335 "xor %%ah, %%ah\n"
22336 "1:"
22337 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
22338 "c" (value),
22339 "b" (bx),
22340 "D" ((long)reg),
22341 - "S" (&pci_indirect));
22342 + "S" (&pci_indirect),
22343 + "r" (__PCIBIOS_DS));
22344 break;
22345 }
22346
22347 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
22348 * Function table for BIOS32 access
22349 */
22350
22351 -static struct pci_raw_ops pci_bios_access = {
22352 +static const struct pci_raw_ops pci_bios_access = {
22353 .read = pci_bios_read,
22354 .write = pci_bios_write
22355 };
22356 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
22357 * Try to find PCI BIOS.
22358 */
22359
22360 -static struct pci_raw_ops * __devinit pci_find_bios(void)
22361 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
22362 {
22363 union bios32 *check;
22364 unsigned char sum;
22365 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
22366
22367 DBG("PCI: Fetching IRQ routing table... ");
22368 __asm__("push %%es\n\t"
22369 + "movw %w8, %%ds\n\t"
22370 "push %%ds\n\t"
22371 "pop %%es\n\t"
22372 - "lcall *(%%esi); cld\n\t"
22373 + "lcall *%%ss:(%%esi); cld\n\t"
22374 "pop %%es\n\t"
22375 + "push %%ss\n\t"
22376 + "pop %%ds\n"
22377 "jc 1f\n\t"
22378 "xor %%ah, %%ah\n"
22379 "1:"
22380 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
22381 "1" (0),
22382 "D" ((long) &opt),
22383 "S" (&pci_indirect),
22384 - "m" (opt)
22385 + "m" (opt),
22386 + "r" (__PCIBIOS_DS)
22387 : "memory");
22388 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
22389 if (ret & 0xff00)
22390 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
22391 {
22392 int ret;
22393
22394 - __asm__("lcall *(%%esi); cld\n\t"
22395 + __asm__("movw %w5, %%ds\n\t"
22396 + "lcall *%%ss:(%%esi); cld\n\t"
22397 + "push %%ss\n\t"
22398 + "pop %%ds\n"
22399 "jc 1f\n\t"
22400 "xor %%ah, %%ah\n"
22401 "1:"
22402 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
22403 : "0" (PCIBIOS_SET_PCI_HW_INT),
22404 "b" ((dev->bus->number << 8) | dev->devfn),
22405 "c" ((irq << 8) | (pin + 10)),
22406 - "S" (&pci_indirect));
22407 + "S" (&pci_indirect),
22408 + "r" (__PCIBIOS_DS));
22409 return !(ret & 0xff00);
22410 }
22411 EXPORT_SYMBOL(pcibios_set_irq_routing);
22412 diff -urNp linux-2.6.32.42/arch/x86/power/cpu.c linux-2.6.32.42/arch/x86/power/cpu.c
22413 --- linux-2.6.32.42/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
22414 +++ linux-2.6.32.42/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
22415 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
22416 static void fix_processor_context(void)
22417 {
22418 int cpu = smp_processor_id();
22419 - struct tss_struct *t = &per_cpu(init_tss, cpu);
22420 + struct tss_struct *t = init_tss + cpu;
22421
22422 set_tss_desc(cpu, t); /*
22423 * This just modifies memory; should not be
22424 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
22425 */
22426
22427 #ifdef CONFIG_X86_64
22428 + pax_open_kernel();
22429 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22430 + pax_close_kernel();
22431
22432 syscall_init(); /* This sets MSR_*STAR and related */
22433 #endif
22434 diff -urNp linux-2.6.32.42/arch/x86/vdso/Makefile linux-2.6.32.42/arch/x86/vdso/Makefile
22435 --- linux-2.6.32.42/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
22436 +++ linux-2.6.32.42/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
22437 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
22438 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
22439 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
22440
22441 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22442 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22443 GCOV_PROFILE := n
22444
22445 #
22446 diff -urNp linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c
22447 --- linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
22448 +++ linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
22449 @@ -22,24 +22,48 @@
22450 #include <asm/hpet.h>
22451 #include <asm/unistd.h>
22452 #include <asm/io.h>
22453 +#include <asm/fixmap.h>
22454 #include "vextern.h"
22455
22456 #define gtod vdso_vsyscall_gtod_data
22457
22458 +notrace noinline long __vdso_fallback_time(long *t)
22459 +{
22460 + long secs;
22461 + asm volatile("syscall"
22462 + : "=a" (secs)
22463 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
22464 + return secs;
22465 +}
22466 +
22467 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
22468 {
22469 long ret;
22470 asm("syscall" : "=a" (ret) :
22471 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
22472 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
22473 return ret;
22474 }
22475
22476 +notrace static inline cycle_t __vdso_vread_hpet(void)
22477 +{
22478 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
22479 +}
22480 +
22481 +notrace static inline cycle_t __vdso_vread_tsc(void)
22482 +{
22483 + cycle_t ret = (cycle_t)vget_cycles();
22484 +
22485 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
22486 +}
22487 +
22488 notrace static inline long vgetns(void)
22489 {
22490 long v;
22491 - cycles_t (*vread)(void);
22492 - vread = gtod->clock.vread;
22493 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
22494 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
22495 + v = __vdso_vread_tsc();
22496 + else
22497 + v = __vdso_vread_hpet();
22498 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
22499 return (v * gtod->clock.mult) >> gtod->clock.shift;
22500 }
22501
22502 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
22503
22504 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
22505 {
22506 - if (likely(gtod->sysctl_enabled))
22507 + if (likely(gtod->sysctl_enabled &&
22508 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
22509 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
22510 switch (clock) {
22511 case CLOCK_REALTIME:
22512 if (likely(gtod->clock.vread))
22513 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
22514 int clock_gettime(clockid_t, struct timespec *)
22515 __attribute__((weak, alias("__vdso_clock_gettime")));
22516
22517 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
22518 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
22519 {
22520 long ret;
22521 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
22522 + asm("syscall" : "=a" (ret) :
22523 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
22524 + return ret;
22525 +}
22526 +
22527 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
22528 +{
22529 + if (likely(gtod->sysctl_enabled &&
22530 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
22531 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
22532 + {
22533 if (likely(tv != NULL)) {
22534 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
22535 offsetof(struct timespec, tv_nsec) ||
22536 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
22537 }
22538 return 0;
22539 }
22540 - asm("syscall" : "=a" (ret) :
22541 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
22542 - return ret;
22543 + return __vdso_fallback_gettimeofday(tv, tz);
22544 }
22545 int gettimeofday(struct timeval *, struct timezone *)
22546 __attribute__((weak, alias("__vdso_gettimeofday")));
22547 diff -urNp linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c
22548 --- linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
22549 +++ linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
22550 @@ -25,6 +25,7 @@
22551 #include <asm/tlbflush.h>
22552 #include <asm/vdso.h>
22553 #include <asm/proto.h>
22554 +#include <asm/mman.h>
22555
22556 enum {
22557 VDSO_DISABLED = 0,
22558 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
22559 void enable_sep_cpu(void)
22560 {
22561 int cpu = get_cpu();
22562 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
22563 + struct tss_struct *tss = init_tss + cpu;
22564
22565 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22566 put_cpu();
22567 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
22568 gate_vma.vm_start = FIXADDR_USER_START;
22569 gate_vma.vm_end = FIXADDR_USER_END;
22570 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
22571 - gate_vma.vm_page_prot = __P101;
22572 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
22573 /*
22574 * Make sure the vDSO gets into every core dump.
22575 * Dumping its contents makes post-mortem fully interpretable later
22576 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
22577 if (compat)
22578 addr = VDSO_HIGH_BASE;
22579 else {
22580 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
22581 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
22582 if (IS_ERR_VALUE(addr)) {
22583 ret = addr;
22584 goto up_fail;
22585 }
22586 }
22587
22588 - current->mm->context.vdso = (void *)addr;
22589 + current->mm->context.vdso = addr;
22590
22591 if (compat_uses_vma || !compat) {
22592 /*
22593 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
22594 }
22595
22596 current_thread_info()->sysenter_return =
22597 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22598 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22599
22600 up_fail:
22601 if (ret)
22602 - current->mm->context.vdso = NULL;
22603 + current->mm->context.vdso = 0;
22604
22605 up_write(&mm->mmap_sem);
22606
22607 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
22608
22609 const char *arch_vma_name(struct vm_area_struct *vma)
22610 {
22611 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22612 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22613 return "[vdso]";
22614 +
22615 +#ifdef CONFIG_PAX_SEGMEXEC
22616 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
22617 + return "[vdso]";
22618 +#endif
22619 +
22620 return NULL;
22621 }
22622
22623 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
22624 struct mm_struct *mm = tsk->mm;
22625
22626 /* Check to see if this task was created in compat vdso mode */
22627 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
22628 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
22629 return &gate_vma;
22630 return NULL;
22631 }
22632 diff -urNp linux-2.6.32.42/arch/x86/vdso/vdso.lds.S linux-2.6.32.42/arch/x86/vdso/vdso.lds.S
22633 --- linux-2.6.32.42/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
22634 +++ linux-2.6.32.42/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
22635 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
22636 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
22637 #include "vextern.h"
22638 #undef VEXTERN
22639 +
22640 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
22641 +VEXTERN(fallback_gettimeofday)
22642 +VEXTERN(fallback_time)
22643 +VEXTERN(getcpu)
22644 +#undef VEXTERN
22645 diff -urNp linux-2.6.32.42/arch/x86/vdso/vextern.h linux-2.6.32.42/arch/x86/vdso/vextern.h
22646 --- linux-2.6.32.42/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
22647 +++ linux-2.6.32.42/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
22648 @@ -11,6 +11,5 @@
22649 put into vextern.h and be referenced as a pointer with vdso prefix.
22650 The main kernel later fills in the values. */
22651
22652 -VEXTERN(jiffies)
22653 VEXTERN(vgetcpu_mode)
22654 VEXTERN(vsyscall_gtod_data)
22655 diff -urNp linux-2.6.32.42/arch/x86/vdso/vma.c linux-2.6.32.42/arch/x86/vdso/vma.c
22656 --- linux-2.6.32.42/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
22657 +++ linux-2.6.32.42/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
22658 @@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
22659 if (!vbase)
22660 goto oom;
22661
22662 - if (memcmp(vbase, "\177ELF", 4)) {
22663 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
22664 printk("VDSO: I'm broken; not ELF\n");
22665 vdso_enabled = 0;
22666 }
22667 @@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
22668 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
22669 #include "vextern.h"
22670 #undef VEXTERN
22671 + vunmap(vbase);
22672 return 0;
22673
22674 oom:
22675 @@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
22676 goto up_fail;
22677 }
22678
22679 - current->mm->context.vdso = (void *)addr;
22680 + current->mm->context.vdso = addr;
22681
22682 ret = install_special_mapping(mm, addr, vdso_size,
22683 VM_READ|VM_EXEC|
22684 @@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
22685 VM_ALWAYSDUMP,
22686 vdso_pages);
22687 if (ret) {
22688 - current->mm->context.vdso = NULL;
22689 + current->mm->context.vdso = 0;
22690 goto up_fail;
22691 }
22692
22693 @@ -132,10 +133,3 @@ up_fail:
22694 up_write(&mm->mmap_sem);
22695 return ret;
22696 }
22697 -
22698 -static __init int vdso_setup(char *s)
22699 -{
22700 - vdso_enabled = simple_strtoul(s, NULL, 0);
22701 - return 0;
22702 -}
22703 -__setup("vdso=", vdso_setup);
22704 diff -urNp linux-2.6.32.42/arch/x86/xen/enlighten.c linux-2.6.32.42/arch/x86/xen/enlighten.c
22705 --- linux-2.6.32.42/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
22706 +++ linux-2.6.32.42/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
22707 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22708
22709 struct shared_info xen_dummy_shared_info;
22710
22711 -void *xen_initial_gdt;
22712 -
22713 /*
22714 * Point at some empty memory to start with. We map the real shared_info
22715 * page as soon as fixmap is up and running.
22716 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
22717
22718 preempt_disable();
22719
22720 - start = __get_cpu_var(idt_desc).address;
22721 + start = (unsigned long)__get_cpu_var(idt_desc).address;
22722 end = start + __get_cpu_var(idt_desc).size + 1;
22723
22724 xen_mc_flush();
22725 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
22726 #endif
22727 };
22728
22729 -static void xen_reboot(int reason)
22730 +static __noreturn void xen_reboot(int reason)
22731 {
22732 struct sched_shutdown r = { .reason = reason };
22733
22734 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
22735 BUG();
22736 }
22737
22738 -static void xen_restart(char *msg)
22739 +static __noreturn void xen_restart(char *msg)
22740 {
22741 xen_reboot(SHUTDOWN_reboot);
22742 }
22743
22744 -static void xen_emergency_restart(void)
22745 +static __noreturn void xen_emergency_restart(void)
22746 {
22747 xen_reboot(SHUTDOWN_reboot);
22748 }
22749
22750 -static void xen_machine_halt(void)
22751 +static __noreturn void xen_machine_halt(void)
22752 {
22753 xen_reboot(SHUTDOWN_poweroff);
22754 }
22755 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
22756 */
22757 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
22758
22759 -#ifdef CONFIG_X86_64
22760 /* Work out if we support NX */
22761 - check_efer();
22762 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22763 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
22764 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
22765 + unsigned l, h;
22766 +
22767 +#ifdef CONFIG_X86_PAE
22768 + nx_enabled = 1;
22769 +#endif
22770 + __supported_pte_mask |= _PAGE_NX;
22771 + rdmsr(MSR_EFER, l, h);
22772 + l |= EFER_NX;
22773 + wrmsr(MSR_EFER, l, h);
22774 + }
22775 #endif
22776
22777 xen_setup_features();
22778 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
22779
22780 machine_ops = xen_machine_ops;
22781
22782 - /*
22783 - * The only reliable way to retain the initial address of the
22784 - * percpu gdt_page is to remember it here, so we can go and
22785 - * mark it RW later, when the initial percpu area is freed.
22786 - */
22787 - xen_initial_gdt = &per_cpu(gdt_page, 0);
22788 -
22789 xen_smp_init();
22790
22791 pgd = (pgd_t *)xen_start_info->pt_base;
22792 diff -urNp linux-2.6.32.42/arch/x86/xen/mmu.c linux-2.6.32.42/arch/x86/xen/mmu.c
22793 --- linux-2.6.32.42/arch/x86/xen/mmu.c 2011-06-25 12:55:34.000000000 -0400
22794 +++ linux-2.6.32.42/arch/x86/xen/mmu.c 2011-06-25 12:56:37.000000000 -0400
22795 @@ -1714,6 +1714,8 @@ __init pgd_t *xen_setup_kernel_pagetable
22796 convert_pfn_mfn(init_level4_pgt);
22797 convert_pfn_mfn(level3_ident_pgt);
22798 convert_pfn_mfn(level3_kernel_pgt);
22799 + convert_pfn_mfn(level3_vmalloc_pgt);
22800 + convert_pfn_mfn(level3_vmemmap_pgt);
22801
22802 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
22803 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
22804 @@ -1732,7 +1734,10 @@ __init pgd_t *xen_setup_kernel_pagetable
22805 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
22806 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
22807 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
22808 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
22809 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
22810 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
22811 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
22812 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
22813 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
22814
22815 diff -urNp linux-2.6.32.42/arch/x86/xen/smp.c linux-2.6.32.42/arch/x86/xen/smp.c
22816 --- linux-2.6.32.42/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
22817 +++ linux-2.6.32.42/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
22818 @@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
22819 {
22820 BUG_ON(smp_processor_id() != 0);
22821 native_smp_prepare_boot_cpu();
22822 -
22823 - /* We've switched to the "real" per-cpu gdt, so make sure the
22824 - old memory can be recycled */
22825 - make_lowmem_page_readwrite(xen_initial_gdt);
22826 -
22827 xen_setup_vcpu_info_placement();
22828 }
22829
22830 @@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
22831 gdt = get_cpu_gdt_table(cpu);
22832
22833 ctxt->flags = VGCF_IN_KERNEL;
22834 - ctxt->user_regs.ds = __USER_DS;
22835 - ctxt->user_regs.es = __USER_DS;
22836 + ctxt->user_regs.ds = __KERNEL_DS;
22837 + ctxt->user_regs.es = __KERNEL_DS;
22838 ctxt->user_regs.ss = __KERNEL_DS;
22839 #ifdef CONFIG_X86_32
22840 ctxt->user_regs.fs = __KERNEL_PERCPU;
22841 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
22842 + savesegment(gs, ctxt->user_regs.gs);
22843 #else
22844 ctxt->gs_base_kernel = per_cpu_offset(cpu);
22845 #endif
22846 @@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
22847 int rc;
22848
22849 per_cpu(current_task, cpu) = idle;
22850 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
22851 #ifdef CONFIG_X86_32
22852 irq_ctx_init(cpu);
22853 #else
22854 clear_tsk_thread_flag(idle, TIF_FORK);
22855 - per_cpu(kernel_stack, cpu) =
22856 - (unsigned long)task_stack_page(idle) -
22857 - KERNEL_STACK_OFFSET + THREAD_SIZE;
22858 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22859 #endif
22860 xen_setup_runstate_info(cpu);
22861 xen_setup_timer(cpu);
22862 diff -urNp linux-2.6.32.42/arch/x86/xen/xen-asm_32.S linux-2.6.32.42/arch/x86/xen/xen-asm_32.S
22863 --- linux-2.6.32.42/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
22864 +++ linux-2.6.32.42/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
22865 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
22866 ESP_OFFSET=4 # bytes pushed onto stack
22867
22868 /*
22869 - * Store vcpu_info pointer for easy access. Do it this way to
22870 - * avoid having to reload %fs
22871 + * Store vcpu_info pointer for easy access.
22872 */
22873 #ifdef CONFIG_SMP
22874 - GET_THREAD_INFO(%eax)
22875 - movl TI_cpu(%eax), %eax
22876 - movl __per_cpu_offset(,%eax,4), %eax
22877 - mov per_cpu__xen_vcpu(%eax), %eax
22878 + push %fs
22879 + mov $(__KERNEL_PERCPU), %eax
22880 + mov %eax, %fs
22881 + mov PER_CPU_VAR(xen_vcpu), %eax
22882 + pop %fs
22883 #else
22884 movl per_cpu__xen_vcpu, %eax
22885 #endif
22886 diff -urNp linux-2.6.32.42/arch/x86/xen/xen-head.S linux-2.6.32.42/arch/x86/xen/xen-head.S
22887 --- linux-2.6.32.42/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
22888 +++ linux-2.6.32.42/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
22889 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
22890 #ifdef CONFIG_X86_32
22891 mov %esi,xen_start_info
22892 mov $init_thread_union+THREAD_SIZE,%esp
22893 +#ifdef CONFIG_SMP
22894 + movl $cpu_gdt_table,%edi
22895 + movl $__per_cpu_load,%eax
22896 + movw %ax,__KERNEL_PERCPU + 2(%edi)
22897 + rorl $16,%eax
22898 + movb %al,__KERNEL_PERCPU + 4(%edi)
22899 + movb %ah,__KERNEL_PERCPU + 7(%edi)
22900 + movl $__per_cpu_end - 1,%eax
22901 + subl $__per_cpu_start,%eax
22902 + movw %ax,__KERNEL_PERCPU + 0(%edi)
22903 +#endif
22904 #else
22905 mov %rsi,xen_start_info
22906 mov $init_thread_union+THREAD_SIZE,%rsp
22907 diff -urNp linux-2.6.32.42/arch/x86/xen/xen-ops.h linux-2.6.32.42/arch/x86/xen/xen-ops.h
22908 --- linux-2.6.32.42/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
22909 +++ linux-2.6.32.42/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
22910 @@ -10,8 +10,6 @@
22911 extern const char xen_hypervisor_callback[];
22912 extern const char xen_failsafe_callback[];
22913
22914 -extern void *xen_initial_gdt;
22915 -
22916 struct trap_info;
22917 void xen_copy_trap_info(struct trap_info *traps);
22918
22919 diff -urNp linux-2.6.32.42/block/blk-integrity.c linux-2.6.32.42/block/blk-integrity.c
22920 --- linux-2.6.32.42/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
22921 +++ linux-2.6.32.42/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
22922 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
22923 NULL,
22924 };
22925
22926 -static struct sysfs_ops integrity_ops = {
22927 +static const struct sysfs_ops integrity_ops = {
22928 .show = &integrity_attr_show,
22929 .store = &integrity_attr_store,
22930 };
22931 diff -urNp linux-2.6.32.42/block/blk-iopoll.c linux-2.6.32.42/block/blk-iopoll.c
22932 --- linux-2.6.32.42/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
22933 +++ linux-2.6.32.42/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
22934 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
22935 }
22936 EXPORT_SYMBOL(blk_iopoll_complete);
22937
22938 -static void blk_iopoll_softirq(struct softirq_action *h)
22939 +static void blk_iopoll_softirq(void)
22940 {
22941 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
22942 int rearm = 0, budget = blk_iopoll_budget;
22943 diff -urNp linux-2.6.32.42/block/blk-map.c linux-2.6.32.42/block/blk-map.c
22944 --- linux-2.6.32.42/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
22945 +++ linux-2.6.32.42/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
22946 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
22947 * direct dma. else, set up kernel bounce buffers
22948 */
22949 uaddr = (unsigned long) ubuf;
22950 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
22951 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
22952 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
22953 else
22954 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
22955 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
22956 for (i = 0; i < iov_count; i++) {
22957 unsigned long uaddr = (unsigned long)iov[i].iov_base;
22958
22959 + if (!iov[i].iov_len)
22960 + return -EINVAL;
22961 +
22962 if (uaddr & queue_dma_alignment(q)) {
22963 unaligned = 1;
22964 break;
22965 }
22966 - if (!iov[i].iov_len)
22967 - return -EINVAL;
22968 }
22969
22970 if (unaligned || (q->dma_pad_mask & len) || map_data)
22971 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
22972 if (!len || !kbuf)
22973 return -EINVAL;
22974
22975 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
22976 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
22977 if (do_copy)
22978 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
22979 else
22980 diff -urNp linux-2.6.32.42/block/blk-softirq.c linux-2.6.32.42/block/blk-softirq.c
22981 --- linux-2.6.32.42/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
22982 +++ linux-2.6.32.42/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
22983 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
22984 * Softirq action handler - move entries to local list and loop over them
22985 * while passing them to the queue registered handler.
22986 */
22987 -static void blk_done_softirq(struct softirq_action *h)
22988 +static void blk_done_softirq(void)
22989 {
22990 struct list_head *cpu_list, local_list;
22991
22992 diff -urNp linux-2.6.32.42/block/blk-sysfs.c linux-2.6.32.42/block/blk-sysfs.c
22993 --- linux-2.6.32.42/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
22994 +++ linux-2.6.32.42/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
22995 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
22996 kmem_cache_free(blk_requestq_cachep, q);
22997 }
22998
22999 -static struct sysfs_ops queue_sysfs_ops = {
23000 +static const struct sysfs_ops queue_sysfs_ops = {
23001 .show = queue_attr_show,
23002 .store = queue_attr_store,
23003 };
23004 diff -urNp linux-2.6.32.42/block/bsg.c linux-2.6.32.42/block/bsg.c
23005 --- linux-2.6.32.42/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
23006 +++ linux-2.6.32.42/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
23007 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23008 struct sg_io_v4 *hdr, struct bsg_device *bd,
23009 fmode_t has_write_perm)
23010 {
23011 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23012 + unsigned char *cmdptr;
23013 +
23014 if (hdr->request_len > BLK_MAX_CDB) {
23015 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23016 if (!rq->cmd)
23017 return -ENOMEM;
23018 - }
23019 + cmdptr = rq->cmd;
23020 + } else
23021 + cmdptr = tmpcmd;
23022
23023 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
23024 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
23025 hdr->request_len))
23026 return -EFAULT;
23027
23028 + if (cmdptr != rq->cmd)
23029 + memcpy(rq->cmd, cmdptr, hdr->request_len);
23030 +
23031 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23032 if (blk_verify_command(rq->cmd, has_write_perm))
23033 return -EPERM;
23034 diff -urNp linux-2.6.32.42/block/elevator.c linux-2.6.32.42/block/elevator.c
23035 --- linux-2.6.32.42/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
23036 +++ linux-2.6.32.42/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
23037 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
23038 return error;
23039 }
23040
23041 -static struct sysfs_ops elv_sysfs_ops = {
23042 +static const struct sysfs_ops elv_sysfs_ops = {
23043 .show = elv_attr_show,
23044 .store = elv_attr_store,
23045 };
23046 diff -urNp linux-2.6.32.42/block/scsi_ioctl.c linux-2.6.32.42/block/scsi_ioctl.c
23047 --- linux-2.6.32.42/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
23048 +++ linux-2.6.32.42/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
23049 @@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
23050 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23051 struct sg_io_hdr *hdr, fmode_t mode)
23052 {
23053 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23054 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23055 + unsigned char *cmdptr;
23056 +
23057 + if (rq->cmd != rq->__cmd)
23058 + cmdptr = rq->cmd;
23059 + else
23060 + cmdptr = tmpcmd;
23061 +
23062 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23063 return -EFAULT;
23064 +
23065 + if (cmdptr != rq->cmd)
23066 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23067 +
23068 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23069 return -EPERM;
23070
23071 @@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
23072 int err;
23073 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23074 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23075 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23076 + unsigned char *cmdptr;
23077
23078 if (!sic)
23079 return -EINVAL;
23080 @@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
23081 */
23082 err = -EFAULT;
23083 rq->cmd_len = cmdlen;
23084 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
23085 +
23086 + if (rq->cmd != rq->__cmd)
23087 + cmdptr = rq->cmd;
23088 + else
23089 + cmdptr = tmpcmd;
23090 +
23091 + if (copy_from_user(cmdptr, sic->data, cmdlen))
23092 goto error;
23093
23094 + if (rq->cmd != cmdptr)
23095 + memcpy(rq->cmd, cmdptr, cmdlen);
23096 +
23097 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23098 goto error;
23099
23100 diff -urNp linux-2.6.32.42/crypto/serpent.c linux-2.6.32.42/crypto/serpent.c
23101 --- linux-2.6.32.42/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
23102 +++ linux-2.6.32.42/crypto/serpent.c 2011-05-16 21:46:57.000000000 -0400
23103 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
23104 u32 r0,r1,r2,r3,r4;
23105 int i;
23106
23107 + pax_track_stack();
23108 +
23109 /* Copy key, add padding */
23110
23111 for (i = 0; i < keylen; ++i)
23112 diff -urNp linux-2.6.32.42/Documentation/dontdiff linux-2.6.32.42/Documentation/dontdiff
23113 --- linux-2.6.32.42/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
23114 +++ linux-2.6.32.42/Documentation/dontdiff 2011-05-18 20:09:36.000000000 -0400
23115 @@ -1,13 +1,16 @@
23116 *.a
23117 *.aux
23118 *.bin
23119 +*.cis
23120 *.cpio
23121 *.csp
23122 +*.dbg
23123 *.dsp
23124 *.dvi
23125 *.elf
23126 *.eps
23127 *.fw
23128 +*.gcno
23129 *.gen.S
23130 *.gif
23131 *.grep
23132 @@ -38,8 +41,10 @@
23133 *.tab.h
23134 *.tex
23135 *.ver
23136 +*.vim
23137 *.xml
23138 *_MODULES
23139 +*_reg_safe.h
23140 *_vga16.c
23141 *~
23142 *.9
23143 @@ -49,11 +54,16 @@
23144 53c700_d.h
23145 CVS
23146 ChangeSet
23147 +GPATH
23148 +GRTAGS
23149 +GSYMS
23150 +GTAGS
23151 Image
23152 Kerntypes
23153 Module.markers
23154 Module.symvers
23155 PENDING
23156 +PERF*
23157 SCCS
23158 System.map*
23159 TAGS
23160 @@ -76,7 +86,11 @@ btfixupprep
23161 build
23162 bvmlinux
23163 bzImage*
23164 +capability_names.h
23165 +capflags.c
23166 classlist.h*
23167 +clut_vga16.c
23168 +common-cmds.h
23169 comp*.log
23170 compile.h*
23171 conf
23172 @@ -103,13 +117,14 @@ gen_crc32table
23173 gen_init_cpio
23174 genksyms
23175 *_gray256.c
23176 +hash
23177 ihex2fw
23178 ikconfig.h*
23179 initramfs_data.cpio
23180 +initramfs_data.cpio.bz2
23181 initramfs_data.cpio.gz
23182 initramfs_list
23183 kallsyms
23184 -kconfig
23185 keywords.c
23186 ksym.c*
23187 ksym.h*
23188 @@ -133,7 +148,9 @@ mkboot
23189 mkbugboot
23190 mkcpustr
23191 mkdep
23192 +mkpiggy
23193 mkprep
23194 +mkregtable
23195 mktables
23196 mktree
23197 modpost
23198 @@ -149,6 +166,7 @@ patches*
23199 pca200e.bin
23200 pca200e_ecd.bin2
23201 piggy.gz
23202 +piggy.S
23203 piggyback
23204 pnmtologo
23205 ppc_defs.h*
23206 @@ -157,12 +175,15 @@ qconf
23207 raid6altivec*.c
23208 raid6int*.c
23209 raid6tables.c
23210 +regdb.c
23211 relocs
23212 +rlim_names.h
23213 series
23214 setup
23215 setup.bin
23216 setup.elf
23217 sImage
23218 +slabinfo
23219 sm_tbl*
23220 split-include
23221 syscalltab.h
23222 @@ -186,14 +207,20 @@ version.h*
23223 vmlinux
23224 vmlinux-*
23225 vmlinux.aout
23226 +vmlinux.bin.all
23227 +vmlinux.bin.bz2
23228 vmlinux.lds
23229 +vmlinux.relocs
23230 +voffset.h
23231 vsyscall.lds
23232 vsyscall_32.lds
23233 wanxlfw.inc
23234 uImage
23235 unifdef
23236 +utsrelease.h
23237 wakeup.bin
23238 wakeup.elf
23239 wakeup.lds
23240 zImage*
23241 zconf.hash.c
23242 +zoffset.h
23243 diff -urNp linux-2.6.32.42/Documentation/kernel-parameters.txt linux-2.6.32.42/Documentation/kernel-parameters.txt
23244 --- linux-2.6.32.42/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
23245 +++ linux-2.6.32.42/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
23246 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
23247 the specified number of seconds. This is to be used if
23248 your oopses keep scrolling off the screen.
23249
23250 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
23251 + virtualization environments that don't cope well with the
23252 + expand down segment used by UDEREF on X86-32 or the frequent
23253 + page table updates on X86-64.
23254 +
23255 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
23256 +
23257 pcbit= [HW,ISDN]
23258
23259 pcd. [PARIDE]
23260 diff -urNp linux-2.6.32.42/drivers/acpi/acpi_pad.c linux-2.6.32.42/drivers/acpi/acpi_pad.c
23261 --- linux-2.6.32.42/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
23262 +++ linux-2.6.32.42/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
23263 @@ -30,7 +30,7 @@
23264 #include <acpi/acpi_bus.h>
23265 #include <acpi/acpi_drivers.h>
23266
23267 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
23268 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
23269 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
23270 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
23271 static DEFINE_MUTEX(isolated_cpus_lock);
23272 diff -urNp linux-2.6.32.42/drivers/acpi/battery.c linux-2.6.32.42/drivers/acpi/battery.c
23273 --- linux-2.6.32.42/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
23274 +++ linux-2.6.32.42/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
23275 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
23276 }
23277
23278 static struct battery_file {
23279 - struct file_operations ops;
23280 + const struct file_operations ops;
23281 mode_t mode;
23282 const char *name;
23283 } acpi_battery_file[] = {
23284 diff -urNp linux-2.6.32.42/drivers/acpi/dock.c linux-2.6.32.42/drivers/acpi/dock.c
23285 --- linux-2.6.32.42/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
23286 +++ linux-2.6.32.42/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
23287 @@ -77,7 +77,7 @@ struct dock_dependent_device {
23288 struct list_head list;
23289 struct list_head hotplug_list;
23290 acpi_handle handle;
23291 - struct acpi_dock_ops *ops;
23292 + const struct acpi_dock_ops *ops;
23293 void *context;
23294 };
23295
23296 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
23297 * the dock driver after _DCK is executed.
23298 */
23299 int
23300 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
23301 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
23302 void *context)
23303 {
23304 struct dock_dependent_device *dd;
23305 diff -urNp linux-2.6.32.42/drivers/acpi/osl.c linux-2.6.32.42/drivers/acpi/osl.c
23306 --- linux-2.6.32.42/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
23307 +++ linux-2.6.32.42/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
23308 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
23309 void __iomem *virt_addr;
23310
23311 virt_addr = ioremap(phys_addr, width);
23312 + if (!virt_addr)
23313 + return AE_NO_MEMORY;
23314 if (!value)
23315 value = &dummy;
23316
23317 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
23318 void __iomem *virt_addr;
23319
23320 virt_addr = ioremap(phys_addr, width);
23321 + if (!virt_addr)
23322 + return AE_NO_MEMORY;
23323
23324 switch (width) {
23325 case 8:
23326 diff -urNp linux-2.6.32.42/drivers/acpi/power_meter.c linux-2.6.32.42/drivers/acpi/power_meter.c
23327 --- linux-2.6.32.42/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
23328 +++ linux-2.6.32.42/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
23329 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
23330 return res;
23331
23332 temp /= 1000;
23333 - if (temp < 0)
23334 - return -EINVAL;
23335
23336 mutex_lock(&resource->lock);
23337 resource->trip[attr->index - 7] = temp;
23338 diff -urNp linux-2.6.32.42/drivers/acpi/proc.c linux-2.6.32.42/drivers/acpi/proc.c
23339 --- linux-2.6.32.42/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
23340 +++ linux-2.6.32.42/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
23341 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
23342 size_t count, loff_t * ppos)
23343 {
23344 struct list_head *node, *next;
23345 - char strbuf[5];
23346 - char str[5] = "";
23347 - unsigned int len = count;
23348 + char strbuf[5] = {0};
23349 struct acpi_device *found_dev = NULL;
23350
23351 - if (len > 4)
23352 - len = 4;
23353 - if (len < 0)
23354 - return -EFAULT;
23355 + if (count > 4)
23356 + count = 4;
23357
23358 - if (copy_from_user(strbuf, buffer, len))
23359 + if (copy_from_user(strbuf, buffer, count))
23360 return -EFAULT;
23361 - strbuf[len] = '\0';
23362 - sscanf(strbuf, "%s", str);
23363 + strbuf[count] = '\0';
23364
23365 mutex_lock(&acpi_device_lock);
23366 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23367 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
23368 if (!dev->wakeup.flags.valid)
23369 continue;
23370
23371 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
23372 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23373 dev->wakeup.state.enabled =
23374 dev->wakeup.state.enabled ? 0 : 1;
23375 found_dev = dev;
23376 diff -urNp linux-2.6.32.42/drivers/acpi/processor_core.c linux-2.6.32.42/drivers/acpi/processor_core.c
23377 --- linux-2.6.32.42/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
23378 +++ linux-2.6.32.42/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
23379 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
23380 return 0;
23381 }
23382
23383 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23384 + BUG_ON(pr->id >= nr_cpu_ids);
23385
23386 /*
23387 * Buggy BIOS check
23388 diff -urNp linux-2.6.32.42/drivers/acpi/sbshc.c linux-2.6.32.42/drivers/acpi/sbshc.c
23389 --- linux-2.6.32.42/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
23390 +++ linux-2.6.32.42/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
23391 @@ -17,7 +17,7 @@
23392
23393 #define PREFIX "ACPI: "
23394
23395 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
23396 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
23397 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
23398
23399 struct acpi_smb_hc {
23400 diff -urNp linux-2.6.32.42/drivers/acpi/sleep.c linux-2.6.32.42/drivers/acpi/sleep.c
23401 --- linux-2.6.32.42/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
23402 +++ linux-2.6.32.42/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
23403 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
23404 }
23405 }
23406
23407 -static struct platform_suspend_ops acpi_suspend_ops = {
23408 +static const struct platform_suspend_ops acpi_suspend_ops = {
23409 .valid = acpi_suspend_state_valid,
23410 .begin = acpi_suspend_begin,
23411 .prepare_late = acpi_pm_prepare,
23412 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
23413 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
23414 * been requested.
23415 */
23416 -static struct platform_suspend_ops acpi_suspend_ops_old = {
23417 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
23418 .valid = acpi_suspend_state_valid,
23419 .begin = acpi_suspend_begin_old,
23420 .prepare_late = acpi_pm_disable_gpes,
23421 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
23422 acpi_enable_all_runtime_gpes();
23423 }
23424
23425 -static struct platform_hibernation_ops acpi_hibernation_ops = {
23426 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
23427 .begin = acpi_hibernation_begin,
23428 .end = acpi_pm_end,
23429 .pre_snapshot = acpi_hibernation_pre_snapshot,
23430 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
23431 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
23432 * been requested.
23433 */
23434 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
23435 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
23436 .begin = acpi_hibernation_begin_old,
23437 .end = acpi_pm_end,
23438 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
23439 diff -urNp linux-2.6.32.42/drivers/acpi/video.c linux-2.6.32.42/drivers/acpi/video.c
23440 --- linux-2.6.32.42/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
23441 +++ linux-2.6.32.42/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
23442 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
23443 vd->brightness->levels[request_level]);
23444 }
23445
23446 -static struct backlight_ops acpi_backlight_ops = {
23447 +static const struct backlight_ops acpi_backlight_ops = {
23448 .get_brightness = acpi_video_get_brightness,
23449 .update_status = acpi_video_set_brightness,
23450 };
23451 diff -urNp linux-2.6.32.42/drivers/ata/ahci.c linux-2.6.32.42/drivers/ata/ahci.c
23452 --- linux-2.6.32.42/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
23453 +++ linux-2.6.32.42/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
23454 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
23455 .sdev_attrs = ahci_sdev_attrs,
23456 };
23457
23458 -static struct ata_port_operations ahci_ops = {
23459 +static const struct ata_port_operations ahci_ops = {
23460 .inherits = &sata_pmp_port_ops,
23461
23462 .qc_defer = sata_pmp_qc_defer_cmd_switch,
23463 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
23464 .port_stop = ahci_port_stop,
23465 };
23466
23467 -static struct ata_port_operations ahci_vt8251_ops = {
23468 +static const struct ata_port_operations ahci_vt8251_ops = {
23469 .inherits = &ahci_ops,
23470 .hardreset = ahci_vt8251_hardreset,
23471 };
23472
23473 -static struct ata_port_operations ahci_p5wdh_ops = {
23474 +static const struct ata_port_operations ahci_p5wdh_ops = {
23475 .inherits = &ahci_ops,
23476 .hardreset = ahci_p5wdh_hardreset,
23477 };
23478
23479 -static struct ata_port_operations ahci_sb600_ops = {
23480 +static const struct ata_port_operations ahci_sb600_ops = {
23481 .inherits = &ahci_ops,
23482 .softreset = ahci_sb600_softreset,
23483 .pmp_softreset = ahci_sb600_softreset,
23484 diff -urNp linux-2.6.32.42/drivers/ata/ata_generic.c linux-2.6.32.42/drivers/ata/ata_generic.c
23485 --- linux-2.6.32.42/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
23486 +++ linux-2.6.32.42/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
23487 @@ -104,7 +104,7 @@ static struct scsi_host_template generic
23488 ATA_BMDMA_SHT(DRV_NAME),
23489 };
23490
23491 -static struct ata_port_operations generic_port_ops = {
23492 +static const struct ata_port_operations generic_port_ops = {
23493 .inherits = &ata_bmdma_port_ops,
23494 .cable_detect = ata_cable_unknown,
23495 .set_mode = generic_set_mode,
23496 diff -urNp linux-2.6.32.42/drivers/ata/ata_piix.c linux-2.6.32.42/drivers/ata/ata_piix.c
23497 --- linux-2.6.32.42/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
23498 +++ linux-2.6.32.42/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
23499 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
23500 ATA_BMDMA_SHT(DRV_NAME),
23501 };
23502
23503 -static struct ata_port_operations piix_pata_ops = {
23504 +static const struct ata_port_operations piix_pata_ops = {
23505 .inherits = &ata_bmdma32_port_ops,
23506 .cable_detect = ata_cable_40wire,
23507 .set_piomode = piix_set_piomode,
23508 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
23509 .prereset = piix_pata_prereset,
23510 };
23511
23512 -static struct ata_port_operations piix_vmw_ops = {
23513 +static const struct ata_port_operations piix_vmw_ops = {
23514 .inherits = &piix_pata_ops,
23515 .bmdma_status = piix_vmw_bmdma_status,
23516 };
23517
23518 -static struct ata_port_operations ich_pata_ops = {
23519 +static const struct ata_port_operations ich_pata_ops = {
23520 .inherits = &piix_pata_ops,
23521 .cable_detect = ich_pata_cable_detect,
23522 .set_dmamode = ich_set_dmamode,
23523 };
23524
23525 -static struct ata_port_operations piix_sata_ops = {
23526 +static const struct ata_port_operations piix_sata_ops = {
23527 .inherits = &ata_bmdma_port_ops,
23528 };
23529
23530 -static struct ata_port_operations piix_sidpr_sata_ops = {
23531 +static const struct ata_port_operations piix_sidpr_sata_ops = {
23532 .inherits = &piix_sata_ops,
23533 .hardreset = sata_std_hardreset,
23534 .scr_read = piix_sidpr_scr_read,
23535 diff -urNp linux-2.6.32.42/drivers/ata/libata-acpi.c linux-2.6.32.42/drivers/ata/libata-acpi.c
23536 --- linux-2.6.32.42/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
23537 +++ linux-2.6.32.42/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
23538 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
23539 ata_acpi_uevent(dev->link->ap, dev, event);
23540 }
23541
23542 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
23543 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
23544 .handler = ata_acpi_dev_notify_dock,
23545 .uevent = ata_acpi_dev_uevent,
23546 };
23547
23548 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
23549 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
23550 .handler = ata_acpi_ap_notify_dock,
23551 .uevent = ata_acpi_ap_uevent,
23552 };
23553 diff -urNp linux-2.6.32.42/drivers/ata/libata-core.c linux-2.6.32.42/drivers/ata/libata-core.c
23554 --- linux-2.6.32.42/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
23555 +++ linux-2.6.32.42/drivers/ata/libata-core.c 2011-04-23 12:56:10.000000000 -0400
23556 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
23557 struct ata_port *ap;
23558 unsigned int tag;
23559
23560 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23561 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23562 ap = qc->ap;
23563
23564 qc->flags = 0;
23565 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
23566 struct ata_port *ap;
23567 struct ata_link *link;
23568
23569 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23570 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23571 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
23572 ap = qc->ap;
23573 link = qc->dev->link;
23574 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
23575 * LOCKING:
23576 * None.
23577 */
23578 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
23579 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
23580 {
23581 static DEFINE_SPINLOCK(lock);
23582 const struct ata_port_operations *cur;
23583 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
23584 return;
23585
23586 spin_lock(&lock);
23587 + pax_open_kernel();
23588
23589 for (cur = ops->inherits; cur; cur = cur->inherits) {
23590 void **inherit = (void **)cur;
23591 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
23592 if (IS_ERR(*pp))
23593 *pp = NULL;
23594
23595 - ops->inherits = NULL;
23596 + ((struct ata_port_operations *)ops)->inherits = NULL;
23597
23598 + pax_close_kernel();
23599 spin_unlock(&lock);
23600 }
23601
23602 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
23603 */
23604 /* KILLME - the only user left is ipr */
23605 void ata_host_init(struct ata_host *host, struct device *dev,
23606 - unsigned long flags, struct ata_port_operations *ops)
23607 + unsigned long flags, const struct ata_port_operations *ops)
23608 {
23609 spin_lock_init(&host->lock);
23610 host->dev = dev;
23611 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
23612 /* truly dummy */
23613 }
23614
23615 -struct ata_port_operations ata_dummy_port_ops = {
23616 +const struct ata_port_operations ata_dummy_port_ops = {
23617 .qc_prep = ata_noop_qc_prep,
23618 .qc_issue = ata_dummy_qc_issue,
23619 .error_handler = ata_dummy_error_handler,
23620 diff -urNp linux-2.6.32.42/drivers/ata/libata-eh.c linux-2.6.32.42/drivers/ata/libata-eh.c
23621 --- linux-2.6.32.42/drivers/ata/libata-eh.c 2011-03-27 14:31:47.000000000 -0400
23622 +++ linux-2.6.32.42/drivers/ata/libata-eh.c 2011-05-16 21:46:57.000000000 -0400
23623 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
23624 {
23625 struct ata_link *link;
23626
23627 + pax_track_stack();
23628 +
23629 ata_for_each_link(link, ap, HOST_FIRST)
23630 ata_eh_link_report(link);
23631 }
23632 @@ -3590,7 +3592,7 @@ void ata_do_eh(struct ata_port *ap, ata_
23633 */
23634 void ata_std_error_handler(struct ata_port *ap)
23635 {
23636 - struct ata_port_operations *ops = ap->ops;
23637 + const struct ata_port_operations *ops = ap->ops;
23638 ata_reset_fn_t hardreset = ops->hardreset;
23639
23640 /* ignore built-in hardreset if SCR access is not available */
23641 diff -urNp linux-2.6.32.42/drivers/ata/libata-pmp.c linux-2.6.32.42/drivers/ata/libata-pmp.c
23642 --- linux-2.6.32.42/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
23643 +++ linux-2.6.32.42/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
23644 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
23645 */
23646 static int sata_pmp_eh_recover(struct ata_port *ap)
23647 {
23648 - struct ata_port_operations *ops = ap->ops;
23649 + const struct ata_port_operations *ops = ap->ops;
23650 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
23651 struct ata_link *pmp_link = &ap->link;
23652 struct ata_device *pmp_dev = pmp_link->device;
23653 diff -urNp linux-2.6.32.42/drivers/ata/pata_acpi.c linux-2.6.32.42/drivers/ata/pata_acpi.c
23654 --- linux-2.6.32.42/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
23655 +++ linux-2.6.32.42/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
23656 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
23657 ATA_BMDMA_SHT(DRV_NAME),
23658 };
23659
23660 -static struct ata_port_operations pacpi_ops = {
23661 +static const struct ata_port_operations pacpi_ops = {
23662 .inherits = &ata_bmdma_port_ops,
23663 .qc_issue = pacpi_qc_issue,
23664 .cable_detect = pacpi_cable_detect,
23665 diff -urNp linux-2.6.32.42/drivers/ata/pata_ali.c linux-2.6.32.42/drivers/ata/pata_ali.c
23666 --- linux-2.6.32.42/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
23667 +++ linux-2.6.32.42/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
23668 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
23669 * Port operations for PIO only ALi
23670 */
23671
23672 -static struct ata_port_operations ali_early_port_ops = {
23673 +static const struct ata_port_operations ali_early_port_ops = {
23674 .inherits = &ata_sff_port_ops,
23675 .cable_detect = ata_cable_40wire,
23676 .set_piomode = ali_set_piomode,
23677 @@ -382,7 +382,7 @@ static const struct ata_port_operations
23678 * Port operations for DMA capable ALi without cable
23679 * detect
23680 */
23681 -static struct ata_port_operations ali_20_port_ops = {
23682 +static const struct ata_port_operations ali_20_port_ops = {
23683 .inherits = &ali_dma_base_ops,
23684 .cable_detect = ata_cable_40wire,
23685 .mode_filter = ali_20_filter,
23686 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
23687 /*
23688 * Port operations for DMA capable ALi with cable detect
23689 */
23690 -static struct ata_port_operations ali_c2_port_ops = {
23691 +static const struct ata_port_operations ali_c2_port_ops = {
23692 .inherits = &ali_dma_base_ops,
23693 .check_atapi_dma = ali_check_atapi_dma,
23694 .cable_detect = ali_c2_cable_detect,
23695 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
23696 /*
23697 * Port operations for DMA capable ALi with cable detect
23698 */
23699 -static struct ata_port_operations ali_c4_port_ops = {
23700 +static const struct ata_port_operations ali_c4_port_ops = {
23701 .inherits = &ali_dma_base_ops,
23702 .check_atapi_dma = ali_check_atapi_dma,
23703 .cable_detect = ali_c2_cable_detect,
23704 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
23705 /*
23706 * Port operations for DMA capable ALi with cable detect and LBA48
23707 */
23708 -static struct ata_port_operations ali_c5_port_ops = {
23709 +static const struct ata_port_operations ali_c5_port_ops = {
23710 .inherits = &ali_dma_base_ops,
23711 .check_atapi_dma = ali_check_atapi_dma,
23712 .dev_config = ali_warn_atapi_dma,
23713 diff -urNp linux-2.6.32.42/drivers/ata/pata_amd.c linux-2.6.32.42/drivers/ata/pata_amd.c
23714 --- linux-2.6.32.42/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
23715 +++ linux-2.6.32.42/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
23716 @@ -397,28 +397,28 @@ static const struct ata_port_operations
23717 .prereset = amd_pre_reset,
23718 };
23719
23720 -static struct ata_port_operations amd33_port_ops = {
23721 +static const struct ata_port_operations amd33_port_ops = {
23722 .inherits = &amd_base_port_ops,
23723 .cable_detect = ata_cable_40wire,
23724 .set_piomode = amd33_set_piomode,
23725 .set_dmamode = amd33_set_dmamode,
23726 };
23727
23728 -static struct ata_port_operations amd66_port_ops = {
23729 +static const struct ata_port_operations amd66_port_ops = {
23730 .inherits = &amd_base_port_ops,
23731 .cable_detect = ata_cable_unknown,
23732 .set_piomode = amd66_set_piomode,
23733 .set_dmamode = amd66_set_dmamode,
23734 };
23735
23736 -static struct ata_port_operations amd100_port_ops = {
23737 +static const struct ata_port_operations amd100_port_ops = {
23738 .inherits = &amd_base_port_ops,
23739 .cable_detect = ata_cable_unknown,
23740 .set_piomode = amd100_set_piomode,
23741 .set_dmamode = amd100_set_dmamode,
23742 };
23743
23744 -static struct ata_port_operations amd133_port_ops = {
23745 +static const struct ata_port_operations amd133_port_ops = {
23746 .inherits = &amd_base_port_ops,
23747 .cable_detect = amd_cable_detect,
23748 .set_piomode = amd133_set_piomode,
23749 @@ -433,13 +433,13 @@ static const struct ata_port_operations
23750 .host_stop = nv_host_stop,
23751 };
23752
23753 -static struct ata_port_operations nv100_port_ops = {
23754 +static const struct ata_port_operations nv100_port_ops = {
23755 .inherits = &nv_base_port_ops,
23756 .set_piomode = nv100_set_piomode,
23757 .set_dmamode = nv100_set_dmamode,
23758 };
23759
23760 -static struct ata_port_operations nv133_port_ops = {
23761 +static const struct ata_port_operations nv133_port_ops = {
23762 .inherits = &nv_base_port_ops,
23763 .set_piomode = nv133_set_piomode,
23764 .set_dmamode = nv133_set_dmamode,
23765 diff -urNp linux-2.6.32.42/drivers/ata/pata_artop.c linux-2.6.32.42/drivers/ata/pata_artop.c
23766 --- linux-2.6.32.42/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
23767 +++ linux-2.6.32.42/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
23768 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
23769 ATA_BMDMA_SHT(DRV_NAME),
23770 };
23771
23772 -static struct ata_port_operations artop6210_ops = {
23773 +static const struct ata_port_operations artop6210_ops = {
23774 .inherits = &ata_bmdma_port_ops,
23775 .cable_detect = ata_cable_40wire,
23776 .set_piomode = artop6210_set_piomode,
23777 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6
23778 .qc_defer = artop6210_qc_defer,
23779 };
23780
23781 -static struct ata_port_operations artop6260_ops = {
23782 +static const struct ata_port_operations artop6260_ops = {
23783 .inherits = &ata_bmdma_port_ops,
23784 .cable_detect = artop6260_cable_detect,
23785 .set_piomode = artop6260_set_piomode,
23786 diff -urNp linux-2.6.32.42/drivers/ata/pata_at32.c linux-2.6.32.42/drivers/ata/pata_at32.c
23787 --- linux-2.6.32.42/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
23788 +++ linux-2.6.32.42/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
23789 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
23790 ATA_PIO_SHT(DRV_NAME),
23791 };
23792
23793 -static struct ata_port_operations at32_port_ops = {
23794 +static const struct ata_port_operations at32_port_ops = {
23795 .inherits = &ata_sff_port_ops,
23796 .cable_detect = ata_cable_40wire,
23797 .set_piomode = pata_at32_set_piomode,
23798 diff -urNp linux-2.6.32.42/drivers/ata/pata_at91.c linux-2.6.32.42/drivers/ata/pata_at91.c
23799 --- linux-2.6.32.42/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
23800 +++ linux-2.6.32.42/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
23801 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
23802 ATA_PIO_SHT(DRV_NAME),
23803 };
23804
23805 -static struct ata_port_operations pata_at91_port_ops = {
23806 +static const struct ata_port_operations pata_at91_port_ops = {
23807 .inherits = &ata_sff_port_ops,
23808
23809 .sff_data_xfer = pata_at91_data_xfer_noirq,
23810 diff -urNp linux-2.6.32.42/drivers/ata/pata_atiixp.c linux-2.6.32.42/drivers/ata/pata_atiixp.c
23811 --- linux-2.6.32.42/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
23812 +++ linux-2.6.32.42/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
23813 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
23814 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23815 };
23816
23817 -static struct ata_port_operations atiixp_port_ops = {
23818 +static const struct ata_port_operations atiixp_port_ops = {
23819 .inherits = &ata_bmdma_port_ops,
23820
23821 .qc_prep = ata_sff_dumb_qc_prep,
23822 diff -urNp linux-2.6.32.42/drivers/ata/pata_atp867x.c linux-2.6.32.42/drivers/ata/pata_atp867x.c
23823 --- linux-2.6.32.42/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
23824 +++ linux-2.6.32.42/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
23825 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
23826 ATA_BMDMA_SHT(DRV_NAME),
23827 };
23828
23829 -static struct ata_port_operations atp867x_ops = {
23830 +static const struct ata_port_operations atp867x_ops = {
23831 .inherits = &ata_bmdma_port_ops,
23832 .cable_detect = atp867x_cable_detect,
23833 .set_piomode = atp867x_set_piomode,
23834 diff -urNp linux-2.6.32.42/drivers/ata/pata_bf54x.c linux-2.6.32.42/drivers/ata/pata_bf54x.c
23835 --- linux-2.6.32.42/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
23836 +++ linux-2.6.32.42/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
23837 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
23838 .dma_boundary = ATA_DMA_BOUNDARY,
23839 };
23840
23841 -static struct ata_port_operations bfin_pata_ops = {
23842 +static const struct ata_port_operations bfin_pata_ops = {
23843 .inherits = &ata_sff_port_ops,
23844
23845 .set_piomode = bfin_set_piomode,
23846 diff -urNp linux-2.6.32.42/drivers/ata/pata_cmd640.c linux-2.6.32.42/drivers/ata/pata_cmd640.c
23847 --- linux-2.6.32.42/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
23848 +++ linux-2.6.32.42/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
23849 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
23850 ATA_BMDMA_SHT(DRV_NAME),
23851 };
23852
23853 -static struct ata_port_operations cmd640_port_ops = {
23854 +static const struct ata_port_operations cmd640_port_ops = {
23855 .inherits = &ata_bmdma_port_ops,
23856 /* In theory xfer_noirq is not needed once we kill the prefetcher */
23857 .sff_data_xfer = ata_sff_data_xfer_noirq,
23858 diff -urNp linux-2.6.32.42/drivers/ata/pata_cmd64x.c linux-2.6.32.42/drivers/ata/pata_cmd64x.c
23859 --- linux-2.6.32.42/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
23860 +++ linux-2.6.32.42/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
23861 @@ -271,18 +271,18 @@ static const struct ata_port_operations
23862 .set_dmamode = cmd64x_set_dmamode,
23863 };
23864
23865 -static struct ata_port_operations cmd64x_port_ops = {
23866 +static const struct ata_port_operations cmd64x_port_ops = {
23867 .inherits = &cmd64x_base_ops,
23868 .cable_detect = ata_cable_40wire,
23869 };
23870
23871 -static struct ata_port_operations cmd646r1_port_ops = {
23872 +static const struct ata_port_operations cmd646r1_port_ops = {
23873 .inherits = &cmd64x_base_ops,
23874 .bmdma_stop = cmd646r1_bmdma_stop,
23875 .cable_detect = ata_cable_40wire,
23876 };
23877
23878 -static struct ata_port_operations cmd648_port_ops = {
23879 +static const struct ata_port_operations cmd648_port_ops = {
23880 .inherits = &cmd64x_base_ops,
23881 .bmdma_stop = cmd648_bmdma_stop,
23882 .cable_detect = cmd648_cable_detect,
23883 diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5520.c linux-2.6.32.42/drivers/ata/pata_cs5520.c
23884 --- linux-2.6.32.42/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
23885 +++ linux-2.6.32.42/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
23886 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
23887 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23888 };
23889
23890 -static struct ata_port_operations cs5520_port_ops = {
23891 +static const struct ata_port_operations cs5520_port_ops = {
23892 .inherits = &ata_bmdma_port_ops,
23893 .qc_prep = ata_sff_dumb_qc_prep,
23894 .cable_detect = ata_cable_40wire,
23895 diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5530.c linux-2.6.32.42/drivers/ata/pata_cs5530.c
23896 --- linux-2.6.32.42/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
23897 +++ linux-2.6.32.42/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
23898 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
23899 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23900 };
23901
23902 -static struct ata_port_operations cs5530_port_ops = {
23903 +static const struct ata_port_operations cs5530_port_ops = {
23904 .inherits = &ata_bmdma_port_ops,
23905
23906 .qc_prep = ata_sff_dumb_qc_prep,
23907 diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5535.c linux-2.6.32.42/drivers/ata/pata_cs5535.c
23908 --- linux-2.6.32.42/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
23909 +++ linux-2.6.32.42/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
23910 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
23911 ATA_BMDMA_SHT(DRV_NAME),
23912 };
23913
23914 -static struct ata_port_operations cs5535_port_ops = {
23915 +static const struct ata_port_operations cs5535_port_ops = {
23916 .inherits = &ata_bmdma_port_ops,
23917 .cable_detect = cs5535_cable_detect,
23918 .set_piomode = cs5535_set_piomode,
23919 diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5536.c linux-2.6.32.42/drivers/ata/pata_cs5536.c
23920 --- linux-2.6.32.42/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
23921 +++ linux-2.6.32.42/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
23922 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
23923 ATA_BMDMA_SHT(DRV_NAME),
23924 };
23925
23926 -static struct ata_port_operations cs5536_port_ops = {
23927 +static const struct ata_port_operations cs5536_port_ops = {
23928 .inherits = &ata_bmdma_port_ops,
23929 .cable_detect = cs5536_cable_detect,
23930 .set_piomode = cs5536_set_piomode,
23931 diff -urNp linux-2.6.32.42/drivers/ata/pata_cypress.c linux-2.6.32.42/drivers/ata/pata_cypress.c
23932 --- linux-2.6.32.42/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
23933 +++ linux-2.6.32.42/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
23934 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
23935 ATA_BMDMA_SHT(DRV_NAME),
23936 };
23937
23938 -static struct ata_port_operations cy82c693_port_ops = {
23939 +static const struct ata_port_operations cy82c693_port_ops = {
23940 .inherits = &ata_bmdma_port_ops,
23941 .cable_detect = ata_cable_40wire,
23942 .set_piomode = cy82c693_set_piomode,
23943 diff -urNp linux-2.6.32.42/drivers/ata/pata_efar.c linux-2.6.32.42/drivers/ata/pata_efar.c
23944 --- linux-2.6.32.42/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
23945 +++ linux-2.6.32.42/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
23946 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
23947 ATA_BMDMA_SHT(DRV_NAME),
23948 };
23949
23950 -static struct ata_port_operations efar_ops = {
23951 +static const struct ata_port_operations efar_ops = {
23952 .inherits = &ata_bmdma_port_ops,
23953 .cable_detect = efar_cable_detect,
23954 .set_piomode = efar_set_piomode,
23955 diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt366.c linux-2.6.32.42/drivers/ata/pata_hpt366.c
23956 --- linux-2.6.32.42/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
23957 +++ linux-2.6.32.42/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
23958 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
23959 * Configuration for HPT366/68
23960 */
23961
23962 -static struct ata_port_operations hpt366_port_ops = {
23963 +static const struct ata_port_operations hpt366_port_ops = {
23964 .inherits = &ata_bmdma_port_ops,
23965 .cable_detect = hpt36x_cable_detect,
23966 .mode_filter = hpt366_filter,
23967 diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt37x.c linux-2.6.32.42/drivers/ata/pata_hpt37x.c
23968 --- linux-2.6.32.42/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
23969 +++ linux-2.6.32.42/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
23970 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
23971 * Configuration for HPT370
23972 */
23973
23974 -static struct ata_port_operations hpt370_port_ops = {
23975 +static const struct ata_port_operations hpt370_port_ops = {
23976 .inherits = &ata_bmdma_port_ops,
23977
23978 .bmdma_stop = hpt370_bmdma_stop,
23979 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
23980 * Configuration for HPT370A. Close to 370 but less filters
23981 */
23982
23983 -static struct ata_port_operations hpt370a_port_ops = {
23984 +static const struct ata_port_operations hpt370a_port_ops = {
23985 .inherits = &hpt370_port_ops,
23986 .mode_filter = hpt370a_filter,
23987 };
23988 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
23989 * and DMA mode setting functionality.
23990 */
23991
23992 -static struct ata_port_operations hpt372_port_ops = {
23993 +static const struct ata_port_operations hpt372_port_ops = {
23994 .inherits = &ata_bmdma_port_ops,
23995
23996 .bmdma_stop = hpt37x_bmdma_stop,
23997 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
23998 * but we have a different cable detection procedure for function 1.
23999 */
24000
24001 -static struct ata_port_operations hpt374_fn1_port_ops = {
24002 +static const struct ata_port_operations hpt374_fn1_port_ops = {
24003 .inherits = &hpt372_port_ops,
24004 .prereset = hpt374_fn1_pre_reset,
24005 };
24006 diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c
24007 --- linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
24008 +++ linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
24009 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
24010 * Configuration for HPT3x2n.
24011 */
24012
24013 -static struct ata_port_operations hpt3x2n_port_ops = {
24014 +static const struct ata_port_operations hpt3x2n_port_ops = {
24015 .inherits = &ata_bmdma_port_ops,
24016
24017 .bmdma_stop = hpt3x2n_bmdma_stop,
24018 diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt3x3.c linux-2.6.32.42/drivers/ata/pata_hpt3x3.c
24019 --- linux-2.6.32.42/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
24020 +++ linux-2.6.32.42/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
24021 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
24022 ATA_BMDMA_SHT(DRV_NAME),
24023 };
24024
24025 -static struct ata_port_operations hpt3x3_port_ops = {
24026 +static const struct ata_port_operations hpt3x3_port_ops = {
24027 .inherits = &ata_bmdma_port_ops,
24028 .cable_detect = ata_cable_40wire,
24029 .set_piomode = hpt3x3_set_piomode,
24030 diff -urNp linux-2.6.32.42/drivers/ata/pata_icside.c linux-2.6.32.42/drivers/ata/pata_icside.c
24031 --- linux-2.6.32.42/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
24032 +++ linux-2.6.32.42/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
24033 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
24034 }
24035 }
24036
24037 -static struct ata_port_operations pata_icside_port_ops = {
24038 +static const struct ata_port_operations pata_icside_port_ops = {
24039 .inherits = &ata_sff_port_ops,
24040 /* no need to build any PRD tables for DMA */
24041 .qc_prep = ata_noop_qc_prep,
24042 diff -urNp linux-2.6.32.42/drivers/ata/pata_isapnp.c linux-2.6.32.42/drivers/ata/pata_isapnp.c
24043 --- linux-2.6.32.42/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
24044 +++ linux-2.6.32.42/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
24045 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
24046 ATA_PIO_SHT(DRV_NAME),
24047 };
24048
24049 -static struct ata_port_operations isapnp_port_ops = {
24050 +static const struct ata_port_operations isapnp_port_ops = {
24051 .inherits = &ata_sff_port_ops,
24052 .cable_detect = ata_cable_40wire,
24053 };
24054
24055 -static struct ata_port_operations isapnp_noalt_port_ops = {
24056 +static const struct ata_port_operations isapnp_noalt_port_ops = {
24057 .inherits = &ata_sff_port_ops,
24058 .cable_detect = ata_cable_40wire,
24059 /* No altstatus so we don't want to use the lost interrupt poll */
24060 diff -urNp linux-2.6.32.42/drivers/ata/pata_it8213.c linux-2.6.32.42/drivers/ata/pata_it8213.c
24061 --- linux-2.6.32.42/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
24062 +++ linux-2.6.32.42/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
24063 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
24064 };
24065
24066
24067 -static struct ata_port_operations it8213_ops = {
24068 +static const struct ata_port_operations it8213_ops = {
24069 .inherits = &ata_bmdma_port_ops,
24070 .cable_detect = it8213_cable_detect,
24071 .set_piomode = it8213_set_piomode,
24072 diff -urNp linux-2.6.32.42/drivers/ata/pata_it821x.c linux-2.6.32.42/drivers/ata/pata_it821x.c
24073 --- linux-2.6.32.42/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
24074 +++ linux-2.6.32.42/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
24075 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
24076 ATA_BMDMA_SHT(DRV_NAME),
24077 };
24078
24079 -static struct ata_port_operations it821x_smart_port_ops = {
24080 +static const struct ata_port_operations it821x_smart_port_ops = {
24081 .inherits = &ata_bmdma_port_ops,
24082
24083 .check_atapi_dma= it821x_check_atapi_dma,
24084 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x
24085 .port_start = it821x_port_start,
24086 };
24087
24088 -static struct ata_port_operations it821x_passthru_port_ops = {
24089 +static const struct ata_port_operations it821x_passthru_port_ops = {
24090 .inherits = &ata_bmdma_port_ops,
24091
24092 .check_atapi_dma= it821x_check_atapi_dma,
24093 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x
24094 .port_start = it821x_port_start,
24095 };
24096
24097 -static struct ata_port_operations it821x_rdc_port_ops = {
24098 +static const struct ata_port_operations it821x_rdc_port_ops = {
24099 .inherits = &ata_bmdma_port_ops,
24100
24101 .check_atapi_dma= it821x_check_atapi_dma,
24102 diff -urNp linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c
24103 --- linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
24104 +++ linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
24105 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
24106 ATA_PIO_SHT(DRV_NAME),
24107 };
24108
24109 -static struct ata_port_operations ixp4xx_port_ops = {
24110 +static const struct ata_port_operations ixp4xx_port_ops = {
24111 .inherits = &ata_sff_port_ops,
24112 .sff_data_xfer = ixp4xx_mmio_data_xfer,
24113 .cable_detect = ata_cable_40wire,
24114 diff -urNp linux-2.6.32.42/drivers/ata/pata_jmicron.c linux-2.6.32.42/drivers/ata/pata_jmicron.c
24115 --- linux-2.6.32.42/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
24116 +++ linux-2.6.32.42/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
24117 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
24118 ATA_BMDMA_SHT(DRV_NAME),
24119 };
24120
24121 -static struct ata_port_operations jmicron_ops = {
24122 +static const struct ata_port_operations jmicron_ops = {
24123 .inherits = &ata_bmdma_port_ops,
24124 .prereset = jmicron_pre_reset,
24125 };
24126 diff -urNp linux-2.6.32.42/drivers/ata/pata_legacy.c linux-2.6.32.42/drivers/ata/pata_legacy.c
24127 --- linux-2.6.32.42/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
24128 +++ linux-2.6.32.42/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
24129 @@ -106,7 +106,7 @@ struct legacy_probe {
24130
24131 struct legacy_controller {
24132 const char *name;
24133 - struct ata_port_operations *ops;
24134 + const struct ata_port_operations *ops;
24135 unsigned int pio_mask;
24136 unsigned int flags;
24137 unsigned int pflags;
24138 @@ -223,12 +223,12 @@ static const struct ata_port_operations
24139 * pio_mask as well.
24140 */
24141
24142 -static struct ata_port_operations simple_port_ops = {
24143 +static const struct ata_port_operations simple_port_ops = {
24144 .inherits = &legacy_base_port_ops,
24145 .sff_data_xfer = ata_sff_data_xfer_noirq,
24146 };
24147
24148 -static struct ata_port_operations legacy_port_ops = {
24149 +static const struct ata_port_operations legacy_port_ops = {
24150 .inherits = &legacy_base_port_ops,
24151 .sff_data_xfer = ata_sff_data_xfer_noirq,
24152 .set_mode = legacy_set_mode,
24153 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
24154 return buflen;
24155 }
24156
24157 -static struct ata_port_operations pdc20230_port_ops = {
24158 +static const struct ata_port_operations pdc20230_port_ops = {
24159 .inherits = &legacy_base_port_ops,
24160 .set_piomode = pdc20230_set_piomode,
24161 .sff_data_xfer = pdc_data_xfer_vlb,
24162 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
24163 ioread8(ap->ioaddr.status_addr);
24164 }
24165
24166 -static struct ata_port_operations ht6560a_port_ops = {
24167 +static const struct ata_port_operations ht6560a_port_ops = {
24168 .inherits = &legacy_base_port_ops,
24169 .set_piomode = ht6560a_set_piomode,
24170 };
24171 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
24172 ioread8(ap->ioaddr.status_addr);
24173 }
24174
24175 -static struct ata_port_operations ht6560b_port_ops = {
24176 +static const struct ata_port_operations ht6560b_port_ops = {
24177 .inherits = &legacy_base_port_ops,
24178 .set_piomode = ht6560b_set_piomode,
24179 };
24180 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
24181 }
24182
24183
24184 -static struct ata_port_operations opti82c611a_port_ops = {
24185 +static const struct ata_port_operations opti82c611a_port_ops = {
24186 .inherits = &legacy_base_port_ops,
24187 .set_piomode = opti82c611a_set_piomode,
24188 };
24189 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
24190 return ata_sff_qc_issue(qc);
24191 }
24192
24193 -static struct ata_port_operations opti82c46x_port_ops = {
24194 +static const struct ata_port_operations opti82c46x_port_ops = {
24195 .inherits = &legacy_base_port_ops,
24196 .set_piomode = opti82c46x_set_piomode,
24197 .qc_issue = opti82c46x_qc_issue,
24198 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
24199 return 0;
24200 }
24201
24202 -static struct ata_port_operations qdi6500_port_ops = {
24203 +static const struct ata_port_operations qdi6500_port_ops = {
24204 .inherits = &legacy_base_port_ops,
24205 .set_piomode = qdi6500_set_piomode,
24206 .qc_issue = qdi_qc_issue,
24207 .sff_data_xfer = vlb32_data_xfer,
24208 };
24209
24210 -static struct ata_port_operations qdi6580_port_ops = {
24211 +static const struct ata_port_operations qdi6580_port_ops = {
24212 .inherits = &legacy_base_port_ops,
24213 .set_piomode = qdi6580_set_piomode,
24214 .sff_data_xfer = vlb32_data_xfer,
24215 };
24216
24217 -static struct ata_port_operations qdi6580dp_port_ops = {
24218 +static const struct ata_port_operations qdi6580dp_port_ops = {
24219 .inherits = &legacy_base_port_ops,
24220 .set_piomode = qdi6580dp_set_piomode,
24221 .sff_data_xfer = vlb32_data_xfer,
24222 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_
24223 return 0;
24224 }
24225
24226 -static struct ata_port_operations winbond_port_ops = {
24227 +static const struct ata_port_operations winbond_port_ops = {
24228 .inherits = &legacy_base_port_ops,
24229 .set_piomode = winbond_set_piomode,
24230 .sff_data_xfer = vlb32_data_xfer,
24231 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
24232 int pio_modes = controller->pio_mask;
24233 unsigned long io = probe->port;
24234 u32 mask = (1 << probe->slot);
24235 - struct ata_port_operations *ops = controller->ops;
24236 + const struct ata_port_operations *ops = controller->ops;
24237 struct legacy_data *ld = &legacy_data[probe->slot];
24238 struct ata_host *host = NULL;
24239 struct ata_port *ap;
24240 diff -urNp linux-2.6.32.42/drivers/ata/pata_marvell.c linux-2.6.32.42/drivers/ata/pata_marvell.c
24241 --- linux-2.6.32.42/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
24242 +++ linux-2.6.32.42/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
24243 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell
24244 ATA_BMDMA_SHT(DRV_NAME),
24245 };
24246
24247 -static struct ata_port_operations marvell_ops = {
24248 +static const struct ata_port_operations marvell_ops = {
24249 .inherits = &ata_bmdma_port_ops,
24250 .cable_detect = marvell_cable_detect,
24251 .prereset = marvell_pre_reset,
24252 diff -urNp linux-2.6.32.42/drivers/ata/pata_mpc52xx.c linux-2.6.32.42/drivers/ata/pata_mpc52xx.c
24253 --- linux-2.6.32.42/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
24254 +++ linux-2.6.32.42/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
24255 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
24256 ATA_PIO_SHT(DRV_NAME),
24257 };
24258
24259 -static struct ata_port_operations mpc52xx_ata_port_ops = {
24260 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
24261 .inherits = &ata_bmdma_port_ops,
24262 .sff_dev_select = mpc52xx_ata_dev_select,
24263 .set_piomode = mpc52xx_ata_set_piomode,
24264 diff -urNp linux-2.6.32.42/drivers/ata/pata_mpiix.c linux-2.6.32.42/drivers/ata/pata_mpiix.c
24265 --- linux-2.6.32.42/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
24266 +++ linux-2.6.32.42/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
24267 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
24268 ATA_PIO_SHT(DRV_NAME),
24269 };
24270
24271 -static struct ata_port_operations mpiix_port_ops = {
24272 +static const struct ata_port_operations mpiix_port_ops = {
24273 .inherits = &ata_sff_port_ops,
24274 .qc_issue = mpiix_qc_issue,
24275 .cable_detect = ata_cable_40wire,
24276 diff -urNp linux-2.6.32.42/drivers/ata/pata_netcell.c linux-2.6.32.42/drivers/ata/pata_netcell.c
24277 --- linux-2.6.32.42/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
24278 +++ linux-2.6.32.42/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
24279 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell
24280 ATA_BMDMA_SHT(DRV_NAME),
24281 };
24282
24283 -static struct ata_port_operations netcell_ops = {
24284 +static const struct ata_port_operations netcell_ops = {
24285 .inherits = &ata_bmdma_port_ops,
24286 .cable_detect = ata_cable_80wire,
24287 .read_id = netcell_read_id,
24288 diff -urNp linux-2.6.32.42/drivers/ata/pata_ninja32.c linux-2.6.32.42/drivers/ata/pata_ninja32.c
24289 --- linux-2.6.32.42/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
24290 +++ linux-2.6.32.42/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
24291 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
24292 ATA_BMDMA_SHT(DRV_NAME),
24293 };
24294
24295 -static struct ata_port_operations ninja32_port_ops = {
24296 +static const struct ata_port_operations ninja32_port_ops = {
24297 .inherits = &ata_bmdma_port_ops,
24298 .sff_dev_select = ninja32_dev_select,
24299 .cable_detect = ata_cable_40wire,
24300 diff -urNp linux-2.6.32.42/drivers/ata/pata_ns87410.c linux-2.6.32.42/drivers/ata/pata_ns87410.c
24301 --- linux-2.6.32.42/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
24302 +++ linux-2.6.32.42/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
24303 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
24304 ATA_PIO_SHT(DRV_NAME),
24305 };
24306
24307 -static struct ata_port_operations ns87410_port_ops = {
24308 +static const struct ata_port_operations ns87410_port_ops = {
24309 .inherits = &ata_sff_port_ops,
24310 .qc_issue = ns87410_qc_issue,
24311 .cable_detect = ata_cable_40wire,
24312 diff -urNp linux-2.6.32.42/drivers/ata/pata_ns87415.c linux-2.6.32.42/drivers/ata/pata_ns87415.c
24313 --- linux-2.6.32.42/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
24314 +++ linux-2.6.32.42/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
24315 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
24316 }
24317 #endif /* 87560 SuperIO Support */
24318
24319 -static struct ata_port_operations ns87415_pata_ops = {
24320 +static const struct ata_port_operations ns87415_pata_ops = {
24321 .inherits = &ata_bmdma_port_ops,
24322
24323 .check_atapi_dma = ns87415_check_atapi_dma,
24324 @@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
24325 };
24326
24327 #if defined(CONFIG_SUPERIO)
24328 -static struct ata_port_operations ns87560_pata_ops = {
24329 +static const struct ata_port_operations ns87560_pata_ops = {
24330 .inherits = &ns87415_pata_ops,
24331 .sff_tf_read = ns87560_tf_read,
24332 .sff_check_status = ns87560_check_status,
24333 diff -urNp linux-2.6.32.42/drivers/ata/pata_octeon_cf.c linux-2.6.32.42/drivers/ata/pata_octeon_cf.c
24334 --- linux-2.6.32.42/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
24335 +++ linux-2.6.32.42/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
24336 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
24337 return 0;
24338 }
24339
24340 +/* cannot be const */
24341 static struct ata_port_operations octeon_cf_ops = {
24342 .inherits = &ata_sff_port_ops,
24343 .check_atapi_dma = octeon_cf_check_atapi_dma,
24344 diff -urNp linux-2.6.32.42/drivers/ata/pata_oldpiix.c linux-2.6.32.42/drivers/ata/pata_oldpiix.c
24345 --- linux-2.6.32.42/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
24346 +++ linux-2.6.32.42/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
24347 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
24348 ATA_BMDMA_SHT(DRV_NAME),
24349 };
24350
24351 -static struct ata_port_operations oldpiix_pata_ops = {
24352 +static const struct ata_port_operations oldpiix_pata_ops = {
24353 .inherits = &ata_bmdma_port_ops,
24354 .qc_issue = oldpiix_qc_issue,
24355 .cable_detect = ata_cable_40wire,
24356 diff -urNp linux-2.6.32.42/drivers/ata/pata_opti.c linux-2.6.32.42/drivers/ata/pata_opti.c
24357 --- linux-2.6.32.42/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
24358 +++ linux-2.6.32.42/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
24359 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
24360 ATA_PIO_SHT(DRV_NAME),
24361 };
24362
24363 -static struct ata_port_operations opti_port_ops = {
24364 +static const struct ata_port_operations opti_port_ops = {
24365 .inherits = &ata_sff_port_ops,
24366 .cable_detect = ata_cable_40wire,
24367 .set_piomode = opti_set_piomode,
24368 diff -urNp linux-2.6.32.42/drivers/ata/pata_optidma.c linux-2.6.32.42/drivers/ata/pata_optidma.c
24369 --- linux-2.6.32.42/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
24370 +++ linux-2.6.32.42/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
24371 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma
24372 ATA_BMDMA_SHT(DRV_NAME),
24373 };
24374
24375 -static struct ata_port_operations optidma_port_ops = {
24376 +static const struct ata_port_operations optidma_port_ops = {
24377 .inherits = &ata_bmdma_port_ops,
24378 .cable_detect = ata_cable_40wire,
24379 .set_piomode = optidma_set_pio_mode,
24380 @@ -346,7 +346,7 @@ static struct ata_port_operations optidm
24381 .prereset = optidma_pre_reset,
24382 };
24383
24384 -static struct ata_port_operations optiplus_port_ops = {
24385 +static const struct ata_port_operations optiplus_port_ops = {
24386 .inherits = &optidma_port_ops,
24387 .set_piomode = optiplus_set_pio_mode,
24388 .set_dmamode = optiplus_set_dma_mode,
24389 diff -urNp linux-2.6.32.42/drivers/ata/pata_palmld.c linux-2.6.32.42/drivers/ata/pata_palmld.c
24390 --- linux-2.6.32.42/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
24391 +++ linux-2.6.32.42/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
24392 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
24393 ATA_PIO_SHT(DRV_NAME),
24394 };
24395
24396 -static struct ata_port_operations palmld_port_ops = {
24397 +static const struct ata_port_operations palmld_port_ops = {
24398 .inherits = &ata_sff_port_ops,
24399 .sff_data_xfer = ata_sff_data_xfer_noirq,
24400 .cable_detect = ata_cable_40wire,
24401 diff -urNp linux-2.6.32.42/drivers/ata/pata_pcmcia.c linux-2.6.32.42/drivers/ata/pata_pcmcia.c
24402 --- linux-2.6.32.42/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
24403 +++ linux-2.6.32.42/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
24404 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
24405 ATA_PIO_SHT(DRV_NAME),
24406 };
24407
24408 -static struct ata_port_operations pcmcia_port_ops = {
24409 +static const struct ata_port_operations pcmcia_port_ops = {
24410 .inherits = &ata_sff_port_ops,
24411 .sff_data_xfer = ata_sff_data_xfer_noirq,
24412 .cable_detect = ata_cable_40wire,
24413 .set_mode = pcmcia_set_mode,
24414 };
24415
24416 -static struct ata_port_operations pcmcia_8bit_port_ops = {
24417 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
24418 .inherits = &ata_sff_port_ops,
24419 .sff_data_xfer = ata_data_xfer_8bit,
24420 .cable_detect = ata_cable_40wire,
24421 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
24422 unsigned long io_base, ctl_base;
24423 void __iomem *io_addr, *ctl_addr;
24424 int n_ports = 1;
24425 - struct ata_port_operations *ops = &pcmcia_port_ops;
24426 + const struct ata_port_operations *ops = &pcmcia_port_ops;
24427
24428 info = kzalloc(sizeof(*info), GFP_KERNEL);
24429 if (info == NULL)
24430 diff -urNp linux-2.6.32.42/drivers/ata/pata_pdc2027x.c linux-2.6.32.42/drivers/ata/pata_pdc2027x.c
24431 --- linux-2.6.32.42/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
24432 +++ linux-2.6.32.42/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
24433 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
24434 ATA_BMDMA_SHT(DRV_NAME),
24435 };
24436
24437 -static struct ata_port_operations pdc2027x_pata100_ops = {
24438 +static const struct ata_port_operations pdc2027x_pata100_ops = {
24439 .inherits = &ata_bmdma_port_ops,
24440 .check_atapi_dma = pdc2027x_check_atapi_dma,
24441 .cable_detect = pdc2027x_cable_detect,
24442 .prereset = pdc2027x_prereset,
24443 };
24444
24445 -static struct ata_port_operations pdc2027x_pata133_ops = {
24446 +static const struct ata_port_operations pdc2027x_pata133_ops = {
24447 .inherits = &pdc2027x_pata100_ops,
24448 .mode_filter = pdc2027x_mode_filter,
24449 .set_piomode = pdc2027x_set_piomode,
24450 diff -urNp linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c
24451 --- linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
24452 +++ linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
24453 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
24454 ATA_BMDMA_SHT(DRV_NAME),
24455 };
24456
24457 -static struct ata_port_operations pdc2024x_port_ops = {
24458 +static const struct ata_port_operations pdc2024x_port_ops = {
24459 .inherits = &ata_bmdma_port_ops,
24460
24461 .cable_detect = ata_cable_40wire,
24462 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
24463 .sff_exec_command = pdc202xx_exec_command,
24464 };
24465
24466 -static struct ata_port_operations pdc2026x_port_ops = {
24467 +static const struct ata_port_operations pdc2026x_port_ops = {
24468 .inherits = &pdc2024x_port_ops,
24469
24470 .check_atapi_dma = pdc2026x_check_atapi_dma,
24471 diff -urNp linux-2.6.32.42/drivers/ata/pata_platform.c linux-2.6.32.42/drivers/ata/pata_platform.c
24472 --- linux-2.6.32.42/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
24473 +++ linux-2.6.32.42/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
24474 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
24475 ATA_PIO_SHT(DRV_NAME),
24476 };
24477
24478 -static struct ata_port_operations pata_platform_port_ops = {
24479 +static const struct ata_port_operations pata_platform_port_ops = {
24480 .inherits = &ata_sff_port_ops,
24481 .sff_data_xfer = ata_sff_data_xfer_noirq,
24482 .cable_detect = ata_cable_unknown,
24483 diff -urNp linux-2.6.32.42/drivers/ata/pata_qdi.c linux-2.6.32.42/drivers/ata/pata_qdi.c
24484 --- linux-2.6.32.42/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
24485 +++ linux-2.6.32.42/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
24486 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
24487 ATA_PIO_SHT(DRV_NAME),
24488 };
24489
24490 -static struct ata_port_operations qdi6500_port_ops = {
24491 +static const struct ata_port_operations qdi6500_port_ops = {
24492 .inherits = &ata_sff_port_ops,
24493 .qc_issue = qdi_qc_issue,
24494 .sff_data_xfer = qdi_data_xfer,
24495 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
24496 .set_piomode = qdi6500_set_piomode,
24497 };
24498
24499 -static struct ata_port_operations qdi6580_port_ops = {
24500 +static const struct ata_port_operations qdi6580_port_ops = {
24501 .inherits = &qdi6500_port_ops,
24502 .set_piomode = qdi6580_set_piomode,
24503 };
24504 diff -urNp linux-2.6.32.42/drivers/ata/pata_radisys.c linux-2.6.32.42/drivers/ata/pata_radisys.c
24505 --- linux-2.6.32.42/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
24506 +++ linux-2.6.32.42/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
24507 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys
24508 ATA_BMDMA_SHT(DRV_NAME),
24509 };
24510
24511 -static struct ata_port_operations radisys_pata_ops = {
24512 +static const struct ata_port_operations radisys_pata_ops = {
24513 .inherits = &ata_bmdma_port_ops,
24514 .qc_issue = radisys_qc_issue,
24515 .cable_detect = ata_cable_unknown,
24516 diff -urNp linux-2.6.32.42/drivers/ata/pata_rb532_cf.c linux-2.6.32.42/drivers/ata/pata_rb532_cf.c
24517 --- linux-2.6.32.42/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
24518 +++ linux-2.6.32.42/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
24519 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
24520 return IRQ_HANDLED;
24521 }
24522
24523 -static struct ata_port_operations rb532_pata_port_ops = {
24524 +static const struct ata_port_operations rb532_pata_port_ops = {
24525 .inherits = &ata_sff_port_ops,
24526 .sff_data_xfer = ata_sff_data_xfer32,
24527 };
24528 diff -urNp linux-2.6.32.42/drivers/ata/pata_rdc.c linux-2.6.32.42/drivers/ata/pata_rdc.c
24529 --- linux-2.6.32.42/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
24530 +++ linux-2.6.32.42/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
24531 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
24532 pci_write_config_byte(dev, 0x48, udma_enable);
24533 }
24534
24535 -static struct ata_port_operations rdc_pata_ops = {
24536 +static const struct ata_port_operations rdc_pata_ops = {
24537 .inherits = &ata_bmdma32_port_ops,
24538 .cable_detect = rdc_pata_cable_detect,
24539 .set_piomode = rdc_set_piomode,
24540 diff -urNp linux-2.6.32.42/drivers/ata/pata_rz1000.c linux-2.6.32.42/drivers/ata/pata_rz1000.c
24541 --- linux-2.6.32.42/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
24542 +++ linux-2.6.32.42/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
24543 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
24544 ATA_PIO_SHT(DRV_NAME),
24545 };
24546
24547 -static struct ata_port_operations rz1000_port_ops = {
24548 +static const struct ata_port_operations rz1000_port_ops = {
24549 .inherits = &ata_sff_port_ops,
24550 .cable_detect = ata_cable_40wire,
24551 .set_mode = rz1000_set_mode,
24552 diff -urNp linux-2.6.32.42/drivers/ata/pata_sc1200.c linux-2.6.32.42/drivers/ata/pata_sc1200.c
24553 --- linux-2.6.32.42/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
24554 +++ linux-2.6.32.42/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
24555 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
24556 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24557 };
24558
24559 -static struct ata_port_operations sc1200_port_ops = {
24560 +static const struct ata_port_operations sc1200_port_ops = {
24561 .inherits = &ata_bmdma_port_ops,
24562 .qc_prep = ata_sff_dumb_qc_prep,
24563 .qc_issue = sc1200_qc_issue,
24564 diff -urNp linux-2.6.32.42/drivers/ata/pata_scc.c linux-2.6.32.42/drivers/ata/pata_scc.c
24565 --- linux-2.6.32.42/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
24566 +++ linux-2.6.32.42/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
24567 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
24568 ATA_BMDMA_SHT(DRV_NAME),
24569 };
24570
24571 -static struct ata_port_operations scc_pata_ops = {
24572 +static const struct ata_port_operations scc_pata_ops = {
24573 .inherits = &ata_bmdma_port_ops,
24574
24575 .set_piomode = scc_set_piomode,
24576 diff -urNp linux-2.6.32.42/drivers/ata/pata_sch.c linux-2.6.32.42/drivers/ata/pata_sch.c
24577 --- linux-2.6.32.42/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
24578 +++ linux-2.6.32.42/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
24579 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
24580 ATA_BMDMA_SHT(DRV_NAME),
24581 };
24582
24583 -static struct ata_port_operations sch_pata_ops = {
24584 +static const struct ata_port_operations sch_pata_ops = {
24585 .inherits = &ata_bmdma_port_ops,
24586 .cable_detect = ata_cable_unknown,
24587 .set_piomode = sch_set_piomode,
24588 diff -urNp linux-2.6.32.42/drivers/ata/pata_serverworks.c linux-2.6.32.42/drivers/ata/pata_serverworks.c
24589 --- linux-2.6.32.42/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
24590 +++ linux-2.6.32.42/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
24591 @@ -299,7 +299,7 @@ static struct scsi_host_template serverw
24592 ATA_BMDMA_SHT(DRV_NAME),
24593 };
24594
24595 -static struct ata_port_operations serverworks_osb4_port_ops = {
24596 +static const struct ata_port_operations serverworks_osb4_port_ops = {
24597 .inherits = &ata_bmdma_port_ops,
24598 .cable_detect = serverworks_cable_detect,
24599 .mode_filter = serverworks_osb4_filter,
24600 @@ -307,7 +307,7 @@ static struct ata_port_operations server
24601 .set_dmamode = serverworks_set_dmamode,
24602 };
24603
24604 -static struct ata_port_operations serverworks_csb_port_ops = {
24605 +static const struct ata_port_operations serverworks_csb_port_ops = {
24606 .inherits = &serverworks_osb4_port_ops,
24607 .mode_filter = serverworks_csb_filter,
24608 };
24609 diff -urNp linux-2.6.32.42/drivers/ata/pata_sil680.c linux-2.6.32.42/drivers/ata/pata_sil680.c
24610 --- linux-2.6.32.42/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
24611 +++ linux-2.6.32.42/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
24612 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
24613 ATA_BMDMA_SHT(DRV_NAME),
24614 };
24615
24616 -static struct ata_port_operations sil680_port_ops = {
24617 +static const struct ata_port_operations sil680_port_ops = {
24618 .inherits = &ata_bmdma32_port_ops,
24619 .cable_detect = sil680_cable_detect,
24620 .set_piomode = sil680_set_piomode,
24621 diff -urNp linux-2.6.32.42/drivers/ata/pata_sis.c linux-2.6.32.42/drivers/ata/pata_sis.c
24622 --- linux-2.6.32.42/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
24623 +++ linux-2.6.32.42/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
24624 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
24625 ATA_BMDMA_SHT(DRV_NAME),
24626 };
24627
24628 -static struct ata_port_operations sis_133_for_sata_ops = {
24629 +static const struct ata_port_operations sis_133_for_sata_ops = {
24630 .inherits = &ata_bmdma_port_ops,
24631 .set_piomode = sis_133_set_piomode,
24632 .set_dmamode = sis_133_set_dmamode,
24633 .cable_detect = sis_133_cable_detect,
24634 };
24635
24636 -static struct ata_port_operations sis_base_ops = {
24637 +static const struct ata_port_operations sis_base_ops = {
24638 .inherits = &ata_bmdma_port_ops,
24639 .prereset = sis_pre_reset,
24640 };
24641
24642 -static struct ata_port_operations sis_133_ops = {
24643 +static const struct ata_port_operations sis_133_ops = {
24644 .inherits = &sis_base_ops,
24645 .set_piomode = sis_133_set_piomode,
24646 .set_dmamode = sis_133_set_dmamode,
24647 .cable_detect = sis_133_cable_detect,
24648 };
24649
24650 -static struct ata_port_operations sis_133_early_ops = {
24651 +static const struct ata_port_operations sis_133_early_ops = {
24652 .inherits = &sis_base_ops,
24653 .set_piomode = sis_100_set_piomode,
24654 .set_dmamode = sis_133_early_set_dmamode,
24655 .cable_detect = sis_66_cable_detect,
24656 };
24657
24658 -static struct ata_port_operations sis_100_ops = {
24659 +static const struct ata_port_operations sis_100_ops = {
24660 .inherits = &sis_base_ops,
24661 .set_piomode = sis_100_set_piomode,
24662 .set_dmamode = sis_100_set_dmamode,
24663 .cable_detect = sis_66_cable_detect,
24664 };
24665
24666 -static struct ata_port_operations sis_66_ops = {
24667 +static const struct ata_port_operations sis_66_ops = {
24668 .inherits = &sis_base_ops,
24669 .set_piomode = sis_old_set_piomode,
24670 .set_dmamode = sis_66_set_dmamode,
24671 .cable_detect = sis_66_cable_detect,
24672 };
24673
24674 -static struct ata_port_operations sis_old_ops = {
24675 +static const struct ata_port_operations sis_old_ops = {
24676 .inherits = &sis_base_ops,
24677 .set_piomode = sis_old_set_piomode,
24678 .set_dmamode = sis_old_set_dmamode,
24679 diff -urNp linux-2.6.32.42/drivers/ata/pata_sl82c105.c linux-2.6.32.42/drivers/ata/pata_sl82c105.c
24680 --- linux-2.6.32.42/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
24681 +++ linux-2.6.32.42/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
24682 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
24683 ATA_BMDMA_SHT(DRV_NAME),
24684 };
24685
24686 -static struct ata_port_operations sl82c105_port_ops = {
24687 +static const struct ata_port_operations sl82c105_port_ops = {
24688 .inherits = &ata_bmdma_port_ops,
24689 .qc_defer = sl82c105_qc_defer,
24690 .bmdma_start = sl82c105_bmdma_start,
24691 diff -urNp linux-2.6.32.42/drivers/ata/pata_triflex.c linux-2.6.32.42/drivers/ata/pata_triflex.c
24692 --- linux-2.6.32.42/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
24693 +++ linux-2.6.32.42/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
24694 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex
24695 ATA_BMDMA_SHT(DRV_NAME),
24696 };
24697
24698 -static struct ata_port_operations triflex_port_ops = {
24699 +static const struct ata_port_operations triflex_port_ops = {
24700 .inherits = &ata_bmdma_port_ops,
24701 .bmdma_start = triflex_bmdma_start,
24702 .bmdma_stop = triflex_bmdma_stop,
24703 diff -urNp linux-2.6.32.42/drivers/ata/pata_via.c linux-2.6.32.42/drivers/ata/pata_via.c
24704 --- linux-2.6.32.42/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
24705 +++ linux-2.6.32.42/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
24706 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
24707 ATA_BMDMA_SHT(DRV_NAME),
24708 };
24709
24710 -static struct ata_port_operations via_port_ops = {
24711 +static const struct ata_port_operations via_port_ops = {
24712 .inherits = &ata_bmdma_port_ops,
24713 .cable_detect = via_cable_detect,
24714 .set_piomode = via_set_piomode,
24715 @@ -429,7 +429,7 @@ static struct ata_port_operations via_po
24716 .port_start = via_port_start,
24717 };
24718
24719 -static struct ata_port_operations via_port_ops_noirq = {
24720 +static const struct ata_port_operations via_port_ops_noirq = {
24721 .inherits = &via_port_ops,
24722 .sff_data_xfer = ata_sff_data_xfer_noirq,
24723 };
24724 diff -urNp linux-2.6.32.42/drivers/ata/pata_winbond.c linux-2.6.32.42/drivers/ata/pata_winbond.c
24725 --- linux-2.6.32.42/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
24726 +++ linux-2.6.32.42/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
24727 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond
24728 ATA_PIO_SHT(DRV_NAME),
24729 };
24730
24731 -static struct ata_port_operations winbond_port_ops = {
24732 +static const struct ata_port_operations winbond_port_ops = {
24733 .inherits = &ata_sff_port_ops,
24734 .sff_data_xfer = winbond_data_xfer,
24735 .cable_detect = ata_cable_40wire,
24736 diff -urNp linux-2.6.32.42/drivers/ata/pdc_adma.c linux-2.6.32.42/drivers/ata/pdc_adma.c
24737 --- linux-2.6.32.42/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
24738 +++ linux-2.6.32.42/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
24739 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
24740 .dma_boundary = ADMA_DMA_BOUNDARY,
24741 };
24742
24743 -static struct ata_port_operations adma_ata_ops = {
24744 +static const struct ata_port_operations adma_ata_ops = {
24745 .inherits = &ata_sff_port_ops,
24746
24747 .lost_interrupt = ATA_OP_NULL,
24748 diff -urNp linux-2.6.32.42/drivers/ata/sata_fsl.c linux-2.6.32.42/drivers/ata/sata_fsl.c
24749 --- linux-2.6.32.42/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
24750 +++ linux-2.6.32.42/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
24751 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
24752 .dma_boundary = ATA_DMA_BOUNDARY,
24753 };
24754
24755 -static struct ata_port_operations sata_fsl_ops = {
24756 +static const struct ata_port_operations sata_fsl_ops = {
24757 .inherits = &sata_pmp_port_ops,
24758
24759 .qc_defer = ata_std_qc_defer,
24760 diff -urNp linux-2.6.32.42/drivers/ata/sata_inic162x.c linux-2.6.32.42/drivers/ata/sata_inic162x.c
24761 --- linux-2.6.32.42/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
24762 +++ linux-2.6.32.42/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
24763 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
24764 return 0;
24765 }
24766
24767 -static struct ata_port_operations inic_port_ops = {
24768 +static const struct ata_port_operations inic_port_ops = {
24769 .inherits = &sata_port_ops,
24770
24771 .check_atapi_dma = inic_check_atapi_dma,
24772 diff -urNp linux-2.6.32.42/drivers/ata/sata_mv.c linux-2.6.32.42/drivers/ata/sata_mv.c
24773 --- linux-2.6.32.42/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
24774 +++ linux-2.6.32.42/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
24775 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
24776 .dma_boundary = MV_DMA_BOUNDARY,
24777 };
24778
24779 -static struct ata_port_operations mv5_ops = {
24780 +static const struct ata_port_operations mv5_ops = {
24781 .inherits = &ata_sff_port_ops,
24782
24783 .lost_interrupt = ATA_OP_NULL,
24784 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
24785 .port_stop = mv_port_stop,
24786 };
24787
24788 -static struct ata_port_operations mv6_ops = {
24789 +static const struct ata_port_operations mv6_ops = {
24790 .inherits = &mv5_ops,
24791 .dev_config = mv6_dev_config,
24792 .scr_read = mv_scr_read,
24793 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
24794 .bmdma_status = mv_bmdma_status,
24795 };
24796
24797 -static struct ata_port_operations mv_iie_ops = {
24798 +static const struct ata_port_operations mv_iie_ops = {
24799 .inherits = &mv6_ops,
24800 .dev_config = ATA_OP_NULL,
24801 .qc_prep = mv_qc_prep_iie,
24802 diff -urNp linux-2.6.32.42/drivers/ata/sata_nv.c linux-2.6.32.42/drivers/ata/sata_nv.c
24803 --- linux-2.6.32.42/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
24804 +++ linux-2.6.32.42/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
24805 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
24806 * cases. Define nv_hardreset() which only kicks in for post-boot
24807 * probing and use it for all variants.
24808 */
24809 -static struct ata_port_operations nv_generic_ops = {
24810 +static const struct ata_port_operations nv_generic_ops = {
24811 .inherits = &ata_bmdma_port_ops,
24812 .lost_interrupt = ATA_OP_NULL,
24813 .scr_read = nv_scr_read,
24814 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
24815 .hardreset = nv_hardreset,
24816 };
24817
24818 -static struct ata_port_operations nv_nf2_ops = {
24819 +static const struct ata_port_operations nv_nf2_ops = {
24820 .inherits = &nv_generic_ops,
24821 .freeze = nv_nf2_freeze,
24822 .thaw = nv_nf2_thaw,
24823 };
24824
24825 -static struct ata_port_operations nv_ck804_ops = {
24826 +static const struct ata_port_operations nv_ck804_ops = {
24827 .inherits = &nv_generic_ops,
24828 .freeze = nv_ck804_freeze,
24829 .thaw = nv_ck804_thaw,
24830 .host_stop = nv_ck804_host_stop,
24831 };
24832
24833 -static struct ata_port_operations nv_adma_ops = {
24834 +static const struct ata_port_operations nv_adma_ops = {
24835 .inherits = &nv_ck804_ops,
24836
24837 .check_atapi_dma = nv_adma_check_atapi_dma,
24838 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
24839 .host_stop = nv_adma_host_stop,
24840 };
24841
24842 -static struct ata_port_operations nv_swncq_ops = {
24843 +static const struct ata_port_operations nv_swncq_ops = {
24844 .inherits = &nv_generic_ops,
24845
24846 .qc_defer = ata_std_qc_defer,
24847 diff -urNp linux-2.6.32.42/drivers/ata/sata_promise.c linux-2.6.32.42/drivers/ata/sata_promise.c
24848 --- linux-2.6.32.42/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
24849 +++ linux-2.6.32.42/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
24850 @@ -195,7 +195,7 @@ static const struct ata_port_operations
24851 .error_handler = pdc_error_handler,
24852 };
24853
24854 -static struct ata_port_operations pdc_sata_ops = {
24855 +static const struct ata_port_operations pdc_sata_ops = {
24856 .inherits = &pdc_common_ops,
24857 .cable_detect = pdc_sata_cable_detect,
24858 .freeze = pdc_sata_freeze,
24859 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
24860
24861 /* First-generation chips need a more restrictive ->check_atapi_dma op,
24862 and ->freeze/thaw that ignore the hotplug controls. */
24863 -static struct ata_port_operations pdc_old_sata_ops = {
24864 +static const struct ata_port_operations pdc_old_sata_ops = {
24865 .inherits = &pdc_sata_ops,
24866 .freeze = pdc_freeze,
24867 .thaw = pdc_thaw,
24868 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
24869 };
24870
24871 -static struct ata_port_operations pdc_pata_ops = {
24872 +static const struct ata_port_operations pdc_pata_ops = {
24873 .inherits = &pdc_common_ops,
24874 .cable_detect = pdc_pata_cable_detect,
24875 .freeze = pdc_freeze,
24876 diff -urNp linux-2.6.32.42/drivers/ata/sata_qstor.c linux-2.6.32.42/drivers/ata/sata_qstor.c
24877 --- linux-2.6.32.42/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
24878 +++ linux-2.6.32.42/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
24879 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
24880 .dma_boundary = QS_DMA_BOUNDARY,
24881 };
24882
24883 -static struct ata_port_operations qs_ata_ops = {
24884 +static const struct ata_port_operations qs_ata_ops = {
24885 .inherits = &ata_sff_port_ops,
24886
24887 .check_atapi_dma = qs_check_atapi_dma,
24888 diff -urNp linux-2.6.32.42/drivers/ata/sata_sil24.c linux-2.6.32.42/drivers/ata/sata_sil24.c
24889 --- linux-2.6.32.42/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
24890 +++ linux-2.6.32.42/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
24891 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
24892 .dma_boundary = ATA_DMA_BOUNDARY,
24893 };
24894
24895 -static struct ata_port_operations sil24_ops = {
24896 +static const struct ata_port_operations sil24_ops = {
24897 .inherits = &sata_pmp_port_ops,
24898
24899 .qc_defer = sil24_qc_defer,
24900 diff -urNp linux-2.6.32.42/drivers/ata/sata_sil.c linux-2.6.32.42/drivers/ata/sata_sil.c
24901 --- linux-2.6.32.42/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
24902 +++ linux-2.6.32.42/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
24903 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
24904 .sg_tablesize = ATA_MAX_PRD
24905 };
24906
24907 -static struct ata_port_operations sil_ops = {
24908 +static const struct ata_port_operations sil_ops = {
24909 .inherits = &ata_bmdma32_port_ops,
24910 .dev_config = sil_dev_config,
24911 .set_mode = sil_set_mode,
24912 diff -urNp linux-2.6.32.42/drivers/ata/sata_sis.c linux-2.6.32.42/drivers/ata/sata_sis.c
24913 --- linux-2.6.32.42/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
24914 +++ linux-2.6.32.42/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
24915 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
24916 ATA_BMDMA_SHT(DRV_NAME),
24917 };
24918
24919 -static struct ata_port_operations sis_ops = {
24920 +static const struct ata_port_operations sis_ops = {
24921 .inherits = &ata_bmdma_port_ops,
24922 .scr_read = sis_scr_read,
24923 .scr_write = sis_scr_write,
24924 diff -urNp linux-2.6.32.42/drivers/ata/sata_svw.c linux-2.6.32.42/drivers/ata/sata_svw.c
24925 --- linux-2.6.32.42/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
24926 +++ linux-2.6.32.42/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
24927 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
24928 };
24929
24930
24931 -static struct ata_port_operations k2_sata_ops = {
24932 +static const struct ata_port_operations k2_sata_ops = {
24933 .inherits = &ata_bmdma_port_ops,
24934 .sff_tf_load = k2_sata_tf_load,
24935 .sff_tf_read = k2_sata_tf_read,
24936 diff -urNp linux-2.6.32.42/drivers/ata/sata_sx4.c linux-2.6.32.42/drivers/ata/sata_sx4.c
24937 --- linux-2.6.32.42/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
24938 +++ linux-2.6.32.42/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
24939 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
24940 };
24941
24942 /* TODO: inherit from base port_ops after converting to new EH */
24943 -static struct ata_port_operations pdc_20621_ops = {
24944 +static const struct ata_port_operations pdc_20621_ops = {
24945 .inherits = &ata_sff_port_ops,
24946
24947 .check_atapi_dma = pdc_check_atapi_dma,
24948 diff -urNp linux-2.6.32.42/drivers/ata/sata_uli.c linux-2.6.32.42/drivers/ata/sata_uli.c
24949 --- linux-2.6.32.42/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
24950 +++ linux-2.6.32.42/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
24951 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
24952 ATA_BMDMA_SHT(DRV_NAME),
24953 };
24954
24955 -static struct ata_port_operations uli_ops = {
24956 +static const struct ata_port_operations uli_ops = {
24957 .inherits = &ata_bmdma_port_ops,
24958 .scr_read = uli_scr_read,
24959 .scr_write = uli_scr_write,
24960 diff -urNp linux-2.6.32.42/drivers/ata/sata_via.c linux-2.6.32.42/drivers/ata/sata_via.c
24961 --- linux-2.6.32.42/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
24962 +++ linux-2.6.32.42/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
24963 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
24964 ATA_BMDMA_SHT(DRV_NAME),
24965 };
24966
24967 -static struct ata_port_operations svia_base_ops = {
24968 +static const struct ata_port_operations svia_base_ops = {
24969 .inherits = &ata_bmdma_port_ops,
24970 .sff_tf_load = svia_tf_load,
24971 };
24972
24973 -static struct ata_port_operations vt6420_sata_ops = {
24974 +static const struct ata_port_operations vt6420_sata_ops = {
24975 .inherits = &svia_base_ops,
24976 .freeze = svia_noop_freeze,
24977 .prereset = vt6420_prereset,
24978 .bmdma_start = vt6420_bmdma_start,
24979 };
24980
24981 -static struct ata_port_operations vt6421_pata_ops = {
24982 +static const struct ata_port_operations vt6421_pata_ops = {
24983 .inherits = &svia_base_ops,
24984 .cable_detect = vt6421_pata_cable_detect,
24985 .set_piomode = vt6421_set_pio_mode,
24986 .set_dmamode = vt6421_set_dma_mode,
24987 };
24988
24989 -static struct ata_port_operations vt6421_sata_ops = {
24990 +static const struct ata_port_operations vt6421_sata_ops = {
24991 .inherits = &svia_base_ops,
24992 .scr_read = svia_scr_read,
24993 .scr_write = svia_scr_write,
24994 };
24995
24996 -static struct ata_port_operations vt8251_ops = {
24997 +static const struct ata_port_operations vt8251_ops = {
24998 .inherits = &svia_base_ops,
24999 .hardreset = sata_std_hardreset,
25000 .scr_read = vt8251_scr_read,
25001 diff -urNp linux-2.6.32.42/drivers/ata/sata_vsc.c linux-2.6.32.42/drivers/ata/sata_vsc.c
25002 --- linux-2.6.32.42/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
25003 +++ linux-2.6.32.42/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
25004 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
25005 };
25006
25007
25008 -static struct ata_port_operations vsc_sata_ops = {
25009 +static const struct ata_port_operations vsc_sata_ops = {
25010 .inherits = &ata_bmdma_port_ops,
25011 /* The IRQ handling is not quite standard SFF behaviour so we
25012 cannot use the default lost interrupt handler */
25013 diff -urNp linux-2.6.32.42/drivers/atm/adummy.c linux-2.6.32.42/drivers/atm/adummy.c
25014 --- linux-2.6.32.42/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
25015 +++ linux-2.6.32.42/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
25016 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
25017 vcc->pop(vcc, skb);
25018 else
25019 dev_kfree_skb_any(skb);
25020 - atomic_inc(&vcc->stats->tx);
25021 + atomic_inc_unchecked(&vcc->stats->tx);
25022
25023 return 0;
25024 }
25025 diff -urNp linux-2.6.32.42/drivers/atm/ambassador.c linux-2.6.32.42/drivers/atm/ambassador.c
25026 --- linux-2.6.32.42/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
25027 +++ linux-2.6.32.42/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
25028 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
25029 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25030
25031 // VC layer stats
25032 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25033 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25034
25035 // free the descriptor
25036 kfree (tx_descr);
25037 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
25038 dump_skb ("<<<", vc, skb);
25039
25040 // VC layer stats
25041 - atomic_inc(&atm_vcc->stats->rx);
25042 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25043 __net_timestamp(skb);
25044 // end of our responsability
25045 atm_vcc->push (atm_vcc, skb);
25046 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
25047 } else {
25048 PRINTK (KERN_INFO, "dropped over-size frame");
25049 // should we count this?
25050 - atomic_inc(&atm_vcc->stats->rx_drop);
25051 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25052 }
25053
25054 } else {
25055 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
25056 }
25057
25058 if (check_area (skb->data, skb->len)) {
25059 - atomic_inc(&atm_vcc->stats->tx_err);
25060 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25061 return -ENOMEM; // ?
25062 }
25063
25064 diff -urNp linux-2.6.32.42/drivers/atm/atmtcp.c linux-2.6.32.42/drivers/atm/atmtcp.c
25065 --- linux-2.6.32.42/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
25066 +++ linux-2.6.32.42/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
25067 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
25068 if (vcc->pop) vcc->pop(vcc,skb);
25069 else dev_kfree_skb(skb);
25070 if (dev_data) return 0;
25071 - atomic_inc(&vcc->stats->tx_err);
25072 + atomic_inc_unchecked(&vcc->stats->tx_err);
25073 return -ENOLINK;
25074 }
25075 size = skb->len+sizeof(struct atmtcp_hdr);
25076 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
25077 if (!new_skb) {
25078 if (vcc->pop) vcc->pop(vcc,skb);
25079 else dev_kfree_skb(skb);
25080 - atomic_inc(&vcc->stats->tx_err);
25081 + atomic_inc_unchecked(&vcc->stats->tx_err);
25082 return -ENOBUFS;
25083 }
25084 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25085 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
25086 if (vcc->pop) vcc->pop(vcc,skb);
25087 else dev_kfree_skb(skb);
25088 out_vcc->push(out_vcc,new_skb);
25089 - atomic_inc(&vcc->stats->tx);
25090 - atomic_inc(&out_vcc->stats->rx);
25091 + atomic_inc_unchecked(&vcc->stats->tx);
25092 + atomic_inc_unchecked(&out_vcc->stats->rx);
25093 return 0;
25094 }
25095
25096 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
25097 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25098 read_unlock(&vcc_sklist_lock);
25099 if (!out_vcc) {
25100 - atomic_inc(&vcc->stats->tx_err);
25101 + atomic_inc_unchecked(&vcc->stats->tx_err);
25102 goto done;
25103 }
25104 skb_pull(skb,sizeof(struct atmtcp_hdr));
25105 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
25106 __net_timestamp(new_skb);
25107 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25108 out_vcc->push(out_vcc,new_skb);
25109 - atomic_inc(&vcc->stats->tx);
25110 - atomic_inc(&out_vcc->stats->rx);
25111 + atomic_inc_unchecked(&vcc->stats->tx);
25112 + atomic_inc_unchecked(&out_vcc->stats->rx);
25113 done:
25114 if (vcc->pop) vcc->pop(vcc,skb);
25115 else dev_kfree_skb(skb);
25116 diff -urNp linux-2.6.32.42/drivers/atm/eni.c linux-2.6.32.42/drivers/atm/eni.c
25117 --- linux-2.6.32.42/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
25118 +++ linux-2.6.32.42/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
25119 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25120 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25121 vcc->dev->number);
25122 length = 0;
25123 - atomic_inc(&vcc->stats->rx_err);
25124 + atomic_inc_unchecked(&vcc->stats->rx_err);
25125 }
25126 else {
25127 length = ATM_CELL_SIZE-1; /* no HEC */
25128 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25129 size);
25130 }
25131 eff = length = 0;
25132 - atomic_inc(&vcc->stats->rx_err);
25133 + atomic_inc_unchecked(&vcc->stats->rx_err);
25134 }
25135 else {
25136 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25137 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25138 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25139 vcc->dev->number,vcc->vci,length,size << 2,descr);
25140 length = eff = 0;
25141 - atomic_inc(&vcc->stats->rx_err);
25142 + atomic_inc_unchecked(&vcc->stats->rx_err);
25143 }
25144 }
25145 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25146 @@ -770,7 +770,7 @@ rx_dequeued++;
25147 vcc->push(vcc,skb);
25148 pushed++;
25149 }
25150 - atomic_inc(&vcc->stats->rx);
25151 + atomic_inc_unchecked(&vcc->stats->rx);
25152 }
25153 wake_up(&eni_dev->rx_wait);
25154 }
25155 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
25156 PCI_DMA_TODEVICE);
25157 if (vcc->pop) vcc->pop(vcc,skb);
25158 else dev_kfree_skb_irq(skb);
25159 - atomic_inc(&vcc->stats->tx);
25160 + atomic_inc_unchecked(&vcc->stats->tx);
25161 wake_up(&eni_dev->tx_wait);
25162 dma_complete++;
25163 }
25164 diff -urNp linux-2.6.32.42/drivers/atm/firestream.c linux-2.6.32.42/drivers/atm/firestream.c
25165 --- linux-2.6.32.42/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
25166 +++ linux-2.6.32.42/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
25167 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct
25168 }
25169 }
25170
25171 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25172 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25173
25174 fs_dprintk (FS_DEBUG_TXMEM, "i");
25175 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25176 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_
25177 #endif
25178 skb_put (skb, qe->p1 & 0xffff);
25179 ATM_SKB(skb)->vcc = atm_vcc;
25180 - atomic_inc(&atm_vcc->stats->rx);
25181 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25182 __net_timestamp(skb);
25183 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25184 atm_vcc->push (atm_vcc, skb);
25185 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_
25186 kfree (pe);
25187 }
25188 if (atm_vcc)
25189 - atomic_inc(&atm_vcc->stats->rx_drop);
25190 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25191 break;
25192 case 0x1f: /* Reassembly abort: no buffers. */
25193 /* Silently increment error counter. */
25194 if (atm_vcc)
25195 - atomic_inc(&atm_vcc->stats->rx_drop);
25196 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25197 break;
25198 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25199 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25200 diff -urNp linux-2.6.32.42/drivers/atm/fore200e.c linux-2.6.32.42/drivers/atm/fore200e.c
25201 --- linux-2.6.32.42/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
25202 +++ linux-2.6.32.42/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
25203 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
25204 #endif
25205 /* check error condition */
25206 if (*entry->status & STATUS_ERROR)
25207 - atomic_inc(&vcc->stats->tx_err);
25208 + atomic_inc_unchecked(&vcc->stats->tx_err);
25209 else
25210 - atomic_inc(&vcc->stats->tx);
25211 + atomic_inc_unchecked(&vcc->stats->tx);
25212 }
25213 }
25214
25215 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
25216 if (skb == NULL) {
25217 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25218
25219 - atomic_inc(&vcc->stats->rx_drop);
25220 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25221 return -ENOMEM;
25222 }
25223
25224 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
25225
25226 dev_kfree_skb_any(skb);
25227
25228 - atomic_inc(&vcc->stats->rx_drop);
25229 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25230 return -ENOMEM;
25231 }
25232
25233 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25234
25235 vcc->push(vcc, skb);
25236 - atomic_inc(&vcc->stats->rx);
25237 + atomic_inc_unchecked(&vcc->stats->rx);
25238
25239 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25240
25241 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
25242 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25243 fore200e->atm_dev->number,
25244 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25245 - atomic_inc(&vcc->stats->rx_err);
25246 + atomic_inc_unchecked(&vcc->stats->rx_err);
25247 }
25248 }
25249
25250 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
25251 goto retry_here;
25252 }
25253
25254 - atomic_inc(&vcc->stats->tx_err);
25255 + atomic_inc_unchecked(&vcc->stats->tx_err);
25256
25257 fore200e->tx_sat++;
25258 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25259 diff -urNp linux-2.6.32.42/drivers/atm/he.c linux-2.6.32.42/drivers/atm/he.c
25260 --- linux-2.6.32.42/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
25261 +++ linux-2.6.32.42/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
25262 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25263
25264 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25265 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25266 - atomic_inc(&vcc->stats->rx_drop);
25267 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25268 goto return_host_buffers;
25269 }
25270
25271 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25272 RBRQ_LEN_ERR(he_dev->rbrq_head)
25273 ? "LEN_ERR" : "",
25274 vcc->vpi, vcc->vci);
25275 - atomic_inc(&vcc->stats->rx_err);
25276 + atomic_inc_unchecked(&vcc->stats->rx_err);
25277 goto return_host_buffers;
25278 }
25279
25280 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25281 vcc->push(vcc, skb);
25282 spin_lock(&he_dev->global_lock);
25283
25284 - atomic_inc(&vcc->stats->rx);
25285 + atomic_inc_unchecked(&vcc->stats->rx);
25286
25287 return_host_buffers:
25288 ++pdus_assembled;
25289 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
25290 tpd->vcc->pop(tpd->vcc, tpd->skb);
25291 else
25292 dev_kfree_skb_any(tpd->skb);
25293 - atomic_inc(&tpd->vcc->stats->tx_err);
25294 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25295 }
25296 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25297 return;
25298 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25299 vcc->pop(vcc, skb);
25300 else
25301 dev_kfree_skb_any(skb);
25302 - atomic_inc(&vcc->stats->tx_err);
25303 + atomic_inc_unchecked(&vcc->stats->tx_err);
25304 return -EINVAL;
25305 }
25306
25307 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25308 vcc->pop(vcc, skb);
25309 else
25310 dev_kfree_skb_any(skb);
25311 - atomic_inc(&vcc->stats->tx_err);
25312 + atomic_inc_unchecked(&vcc->stats->tx_err);
25313 return -EINVAL;
25314 }
25315 #endif
25316 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25317 vcc->pop(vcc, skb);
25318 else
25319 dev_kfree_skb_any(skb);
25320 - atomic_inc(&vcc->stats->tx_err);
25321 + atomic_inc_unchecked(&vcc->stats->tx_err);
25322 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25323 return -ENOMEM;
25324 }
25325 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25326 vcc->pop(vcc, skb);
25327 else
25328 dev_kfree_skb_any(skb);
25329 - atomic_inc(&vcc->stats->tx_err);
25330 + atomic_inc_unchecked(&vcc->stats->tx_err);
25331 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25332 return -ENOMEM;
25333 }
25334 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25335 __enqueue_tpd(he_dev, tpd, cid);
25336 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25337
25338 - atomic_inc(&vcc->stats->tx);
25339 + atomic_inc_unchecked(&vcc->stats->tx);
25340
25341 return 0;
25342 }
25343 diff -urNp linux-2.6.32.42/drivers/atm/horizon.c linux-2.6.32.42/drivers/atm/horizon.c
25344 --- linux-2.6.32.42/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
25345 +++ linux-2.6.32.42/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
25346 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
25347 {
25348 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25349 // VC layer stats
25350 - atomic_inc(&vcc->stats->rx);
25351 + atomic_inc_unchecked(&vcc->stats->rx);
25352 __net_timestamp(skb);
25353 // end of our responsability
25354 vcc->push (vcc, skb);
25355 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
25356 dev->tx_iovec = NULL;
25357
25358 // VC layer stats
25359 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25360 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25361
25362 // free the skb
25363 hrz_kfree_skb (skb);
25364 diff -urNp linux-2.6.32.42/drivers/atm/idt77252.c linux-2.6.32.42/drivers/atm/idt77252.c
25365 --- linux-2.6.32.42/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
25366 +++ linux-2.6.32.42/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
25367 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
25368 else
25369 dev_kfree_skb(skb);
25370
25371 - atomic_inc(&vcc->stats->tx);
25372 + atomic_inc_unchecked(&vcc->stats->tx);
25373 }
25374
25375 atomic_dec(&scq->used);
25376 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
25377 if ((sb = dev_alloc_skb(64)) == NULL) {
25378 printk("%s: Can't allocate buffers for aal0.\n",
25379 card->name);
25380 - atomic_add(i, &vcc->stats->rx_drop);
25381 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25382 break;
25383 }
25384 if (!atm_charge(vcc, sb->truesize)) {
25385 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25386 card->name);
25387 - atomic_add(i - 1, &vcc->stats->rx_drop);
25388 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25389 dev_kfree_skb(sb);
25390 break;
25391 }
25392 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
25393 ATM_SKB(sb)->vcc = vcc;
25394 __net_timestamp(sb);
25395 vcc->push(vcc, sb);
25396 - atomic_inc(&vcc->stats->rx);
25397 + atomic_inc_unchecked(&vcc->stats->rx);
25398
25399 cell += ATM_CELL_PAYLOAD;
25400 }
25401 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
25402 "(CDC: %08x)\n",
25403 card->name, len, rpp->len, readl(SAR_REG_CDC));
25404 recycle_rx_pool_skb(card, rpp);
25405 - atomic_inc(&vcc->stats->rx_err);
25406 + atomic_inc_unchecked(&vcc->stats->rx_err);
25407 return;
25408 }
25409 if (stat & SAR_RSQE_CRC) {
25410 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25411 recycle_rx_pool_skb(card, rpp);
25412 - atomic_inc(&vcc->stats->rx_err);
25413 + atomic_inc_unchecked(&vcc->stats->rx_err);
25414 return;
25415 }
25416 if (skb_queue_len(&rpp->queue) > 1) {
25417 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
25418 RXPRINTK("%s: Can't alloc RX skb.\n",
25419 card->name);
25420 recycle_rx_pool_skb(card, rpp);
25421 - atomic_inc(&vcc->stats->rx_err);
25422 + atomic_inc_unchecked(&vcc->stats->rx_err);
25423 return;
25424 }
25425 if (!atm_charge(vcc, skb->truesize)) {
25426 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
25427 __net_timestamp(skb);
25428
25429 vcc->push(vcc, skb);
25430 - atomic_inc(&vcc->stats->rx);
25431 + atomic_inc_unchecked(&vcc->stats->rx);
25432
25433 return;
25434 }
25435 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
25436 __net_timestamp(skb);
25437
25438 vcc->push(vcc, skb);
25439 - atomic_inc(&vcc->stats->rx);
25440 + atomic_inc_unchecked(&vcc->stats->rx);
25441
25442 if (skb->truesize > SAR_FB_SIZE_3)
25443 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25444 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
25445 if (vcc->qos.aal != ATM_AAL0) {
25446 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25447 card->name, vpi, vci);
25448 - atomic_inc(&vcc->stats->rx_drop);
25449 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25450 goto drop;
25451 }
25452
25453 if ((sb = dev_alloc_skb(64)) == NULL) {
25454 printk("%s: Can't allocate buffers for AAL0.\n",
25455 card->name);
25456 - atomic_inc(&vcc->stats->rx_err);
25457 + atomic_inc_unchecked(&vcc->stats->rx_err);
25458 goto drop;
25459 }
25460
25461 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
25462 ATM_SKB(sb)->vcc = vcc;
25463 __net_timestamp(sb);
25464 vcc->push(vcc, sb);
25465 - atomic_inc(&vcc->stats->rx);
25466 + atomic_inc_unchecked(&vcc->stats->rx);
25467
25468 drop:
25469 skb_pull(queue, 64);
25470 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25471
25472 if (vc == NULL) {
25473 printk("%s: NULL connection in send().\n", card->name);
25474 - atomic_inc(&vcc->stats->tx_err);
25475 + atomic_inc_unchecked(&vcc->stats->tx_err);
25476 dev_kfree_skb(skb);
25477 return -EINVAL;
25478 }
25479 if (!test_bit(VCF_TX, &vc->flags)) {
25480 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25481 - atomic_inc(&vcc->stats->tx_err);
25482 + atomic_inc_unchecked(&vcc->stats->tx_err);
25483 dev_kfree_skb(skb);
25484 return -EINVAL;
25485 }
25486 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25487 break;
25488 default:
25489 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25490 - atomic_inc(&vcc->stats->tx_err);
25491 + atomic_inc_unchecked(&vcc->stats->tx_err);
25492 dev_kfree_skb(skb);
25493 return -EINVAL;
25494 }
25495
25496 if (skb_shinfo(skb)->nr_frags != 0) {
25497 printk("%s: No scatter-gather yet.\n", card->name);
25498 - atomic_inc(&vcc->stats->tx_err);
25499 + atomic_inc_unchecked(&vcc->stats->tx_err);
25500 dev_kfree_skb(skb);
25501 return -EINVAL;
25502 }
25503 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25504
25505 err = queue_skb(card, vc, skb, oam);
25506 if (err) {
25507 - atomic_inc(&vcc->stats->tx_err);
25508 + atomic_inc_unchecked(&vcc->stats->tx_err);
25509 dev_kfree_skb(skb);
25510 return err;
25511 }
25512 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
25513 skb = dev_alloc_skb(64);
25514 if (!skb) {
25515 printk("%s: Out of memory in send_oam().\n", card->name);
25516 - atomic_inc(&vcc->stats->tx_err);
25517 + atomic_inc_unchecked(&vcc->stats->tx_err);
25518 return -ENOMEM;
25519 }
25520 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25521 diff -urNp linux-2.6.32.42/drivers/atm/iphase.c linux-2.6.32.42/drivers/atm/iphase.c
25522 --- linux-2.6.32.42/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
25523 +++ linux-2.6.32.42/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
25524 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
25525 status = (u_short) (buf_desc_ptr->desc_mode);
25526 if (status & (RX_CER | RX_PTE | RX_OFL))
25527 {
25528 - atomic_inc(&vcc->stats->rx_err);
25529 + atomic_inc_unchecked(&vcc->stats->rx_err);
25530 IF_ERR(printk("IA: bad packet, dropping it");)
25531 if (status & RX_CER) {
25532 IF_ERR(printk(" cause: packet CRC error\n");)
25533 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25534 len = dma_addr - buf_addr;
25535 if (len > iadev->rx_buf_sz) {
25536 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25537 - atomic_inc(&vcc->stats->rx_err);
25538 + atomic_inc_unchecked(&vcc->stats->rx_err);
25539 goto out_free_desc;
25540 }
25541
25542 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
25543 ia_vcc = INPH_IA_VCC(vcc);
25544 if (ia_vcc == NULL)
25545 {
25546 - atomic_inc(&vcc->stats->rx_err);
25547 + atomic_inc_unchecked(&vcc->stats->rx_err);
25548 dev_kfree_skb_any(skb);
25549 atm_return(vcc, atm_guess_pdu2truesize(len));
25550 goto INCR_DLE;
25551 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
25552 if ((length > iadev->rx_buf_sz) || (length >
25553 (skb->len - sizeof(struct cpcs_trailer))))
25554 {
25555 - atomic_inc(&vcc->stats->rx_err);
25556 + atomic_inc_unchecked(&vcc->stats->rx_err);
25557 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25558 length, skb->len);)
25559 dev_kfree_skb_any(skb);
25560 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
25561
25562 IF_RX(printk("rx_dle_intr: skb push");)
25563 vcc->push(vcc,skb);
25564 - atomic_inc(&vcc->stats->rx);
25565 + atomic_inc_unchecked(&vcc->stats->rx);
25566 iadev->rx_pkt_cnt++;
25567 }
25568 INCR_DLE:
25569 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
25570 {
25571 struct k_sonet_stats *stats;
25572 stats = &PRIV(_ia_dev[board])->sonet_stats;
25573 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25574 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25575 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25576 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25577 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25578 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25579 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25580 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25581 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25582 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25583 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25584 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25585 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25586 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25587 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25588 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25589 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25590 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25591 }
25592 ia_cmds.status = 0;
25593 break;
25594 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
25595 if ((desc == 0) || (desc > iadev->num_tx_desc))
25596 {
25597 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25598 - atomic_inc(&vcc->stats->tx);
25599 + atomic_inc_unchecked(&vcc->stats->tx);
25600 if (vcc->pop)
25601 vcc->pop(vcc, skb);
25602 else
25603 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
25604 ATM_DESC(skb) = vcc->vci;
25605 skb_queue_tail(&iadev->tx_dma_q, skb);
25606
25607 - atomic_inc(&vcc->stats->tx);
25608 + atomic_inc_unchecked(&vcc->stats->tx);
25609 iadev->tx_pkt_cnt++;
25610 /* Increment transaction counter */
25611 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25612
25613 #if 0
25614 /* add flow control logic */
25615 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25616 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25617 if (iavcc->vc_desc_cnt > 10) {
25618 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25619 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25620 diff -urNp linux-2.6.32.42/drivers/atm/lanai.c linux-2.6.32.42/drivers/atm/lanai.c
25621 --- linux-2.6.32.42/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
25622 +++ linux-2.6.32.42/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
25623 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
25624 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25625 lanai_endtx(lanai, lvcc);
25626 lanai_free_skb(lvcc->tx.atmvcc, skb);
25627 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25628 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25629 }
25630
25631 /* Try to fill the buffer - don't call unless there is backlog */
25632 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
25633 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25634 __net_timestamp(skb);
25635 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25636 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25637 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25638 out:
25639 lvcc->rx.buf.ptr = end;
25640 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25641 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
25642 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25643 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25644 lanai->stats.service_rxnotaal5++;
25645 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25646 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25647 return 0;
25648 }
25649 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25650 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
25651 int bytes;
25652 read_unlock(&vcc_sklist_lock);
25653 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25654 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25655 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25656 lvcc->stats.x.aal5.service_trash++;
25657 bytes = (SERVICE_GET_END(s) * 16) -
25658 (((unsigned long) lvcc->rx.buf.ptr) -
25659 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
25660 }
25661 if (s & SERVICE_STREAM) {
25662 read_unlock(&vcc_sklist_lock);
25663 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25664 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25665 lvcc->stats.x.aal5.service_stream++;
25666 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25667 "PDU on VCI %d!\n", lanai->number, vci);
25668 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
25669 return 0;
25670 }
25671 DPRINTK("got rx crc error on vci %d\n", vci);
25672 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25673 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25674 lvcc->stats.x.aal5.service_rxcrc++;
25675 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25676 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25677 diff -urNp linux-2.6.32.42/drivers/atm/nicstar.c linux-2.6.32.42/drivers/atm/nicstar.c
25678 --- linux-2.6.32.42/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
25679 +++ linux-2.6.32.42/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
25680 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
25681 if ((vc = (vc_map *) vcc->dev_data) == NULL)
25682 {
25683 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
25684 - atomic_inc(&vcc->stats->tx_err);
25685 + atomic_inc_unchecked(&vcc->stats->tx_err);
25686 dev_kfree_skb_any(skb);
25687 return -EINVAL;
25688 }
25689 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
25690 if (!vc->tx)
25691 {
25692 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
25693 - atomic_inc(&vcc->stats->tx_err);
25694 + atomic_inc_unchecked(&vcc->stats->tx_err);
25695 dev_kfree_skb_any(skb);
25696 return -EINVAL;
25697 }
25698 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
25699 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
25700 {
25701 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
25702 - atomic_inc(&vcc->stats->tx_err);
25703 + atomic_inc_unchecked(&vcc->stats->tx_err);
25704 dev_kfree_skb_any(skb);
25705 return -EINVAL;
25706 }
25707 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
25708 if (skb_shinfo(skb)->nr_frags != 0)
25709 {
25710 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25711 - atomic_inc(&vcc->stats->tx_err);
25712 + atomic_inc_unchecked(&vcc->stats->tx_err);
25713 dev_kfree_skb_any(skb);
25714 return -EINVAL;
25715 }
25716 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
25717
25718 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
25719 {
25720 - atomic_inc(&vcc->stats->tx_err);
25721 + atomic_inc_unchecked(&vcc->stats->tx_err);
25722 dev_kfree_skb_any(skb);
25723 return -EIO;
25724 }
25725 - atomic_inc(&vcc->stats->tx);
25726 + atomic_inc_unchecked(&vcc->stats->tx);
25727
25728 return 0;
25729 }
25730 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
25731 {
25732 printk("nicstar%d: Can't allocate buffers for aal0.\n",
25733 card->index);
25734 - atomic_add(i,&vcc->stats->rx_drop);
25735 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
25736 break;
25737 }
25738 if (!atm_charge(vcc, sb->truesize))
25739 {
25740 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
25741 card->index);
25742 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
25743 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
25744 dev_kfree_skb_any(sb);
25745 break;
25746 }
25747 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
25748 ATM_SKB(sb)->vcc = vcc;
25749 __net_timestamp(sb);
25750 vcc->push(vcc, sb);
25751 - atomic_inc(&vcc->stats->rx);
25752 + atomic_inc_unchecked(&vcc->stats->rx);
25753 cell += ATM_CELL_PAYLOAD;
25754 }
25755
25756 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
25757 if (iovb == NULL)
25758 {
25759 printk("nicstar%d: Out of iovec buffers.\n", card->index);
25760 - atomic_inc(&vcc->stats->rx_drop);
25761 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25762 recycle_rx_buf(card, skb);
25763 return;
25764 }
25765 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
25766 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
25767 {
25768 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
25769 - atomic_inc(&vcc->stats->rx_err);
25770 + atomic_inc_unchecked(&vcc->stats->rx_err);
25771 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
25772 NS_SKB(iovb)->iovcnt = 0;
25773 iovb->len = 0;
25774 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
25775 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
25776 card->index);
25777 which_list(card, skb);
25778 - atomic_inc(&vcc->stats->rx_err);
25779 + atomic_inc_unchecked(&vcc->stats->rx_err);
25780 recycle_rx_buf(card, skb);
25781 vc->rx_iov = NULL;
25782 recycle_iov_buf(card, iovb);
25783 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
25784 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
25785 card->index);
25786 which_list(card, skb);
25787 - atomic_inc(&vcc->stats->rx_err);
25788 + atomic_inc_unchecked(&vcc->stats->rx_err);
25789 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25790 NS_SKB(iovb)->iovcnt);
25791 vc->rx_iov = NULL;
25792 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
25793 printk(" - PDU size mismatch.\n");
25794 else
25795 printk(".\n");
25796 - atomic_inc(&vcc->stats->rx_err);
25797 + atomic_inc_unchecked(&vcc->stats->rx_err);
25798 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25799 NS_SKB(iovb)->iovcnt);
25800 vc->rx_iov = NULL;
25801 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
25802 if (!atm_charge(vcc, skb->truesize))
25803 {
25804 push_rxbufs(card, skb);
25805 - atomic_inc(&vcc->stats->rx_drop);
25806 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25807 }
25808 else
25809 {
25810 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
25811 ATM_SKB(skb)->vcc = vcc;
25812 __net_timestamp(skb);
25813 vcc->push(vcc, skb);
25814 - atomic_inc(&vcc->stats->rx);
25815 + atomic_inc_unchecked(&vcc->stats->rx);
25816 }
25817 }
25818 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
25819 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
25820 if (!atm_charge(vcc, sb->truesize))
25821 {
25822 push_rxbufs(card, sb);
25823 - atomic_inc(&vcc->stats->rx_drop);
25824 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25825 }
25826 else
25827 {
25828 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
25829 ATM_SKB(sb)->vcc = vcc;
25830 __net_timestamp(sb);
25831 vcc->push(vcc, sb);
25832 - atomic_inc(&vcc->stats->rx);
25833 + atomic_inc_unchecked(&vcc->stats->rx);
25834 }
25835
25836 push_rxbufs(card, skb);
25837 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
25838 if (!atm_charge(vcc, skb->truesize))
25839 {
25840 push_rxbufs(card, skb);
25841 - atomic_inc(&vcc->stats->rx_drop);
25842 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25843 }
25844 else
25845 {
25846 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
25847 ATM_SKB(skb)->vcc = vcc;
25848 __net_timestamp(skb);
25849 vcc->push(vcc, skb);
25850 - atomic_inc(&vcc->stats->rx);
25851 + atomic_inc_unchecked(&vcc->stats->rx);
25852 }
25853
25854 push_rxbufs(card, sb);
25855 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
25856 if (hb == NULL)
25857 {
25858 printk("nicstar%d: Out of huge buffers.\n", card->index);
25859 - atomic_inc(&vcc->stats->rx_drop);
25860 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25861 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25862 NS_SKB(iovb)->iovcnt);
25863 vc->rx_iov = NULL;
25864 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
25865 }
25866 else
25867 dev_kfree_skb_any(hb);
25868 - atomic_inc(&vcc->stats->rx_drop);
25869 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25870 }
25871 else
25872 {
25873 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
25874 #endif /* NS_USE_DESTRUCTORS */
25875 __net_timestamp(hb);
25876 vcc->push(vcc, hb);
25877 - atomic_inc(&vcc->stats->rx);
25878 + atomic_inc_unchecked(&vcc->stats->rx);
25879 }
25880 }
25881
25882 diff -urNp linux-2.6.32.42/drivers/atm/solos-pci.c linux-2.6.32.42/drivers/atm/solos-pci.c
25883 --- linux-2.6.32.42/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
25884 +++ linux-2.6.32.42/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
25885 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
25886 }
25887 atm_charge(vcc, skb->truesize);
25888 vcc->push(vcc, skb);
25889 - atomic_inc(&vcc->stats->rx);
25890 + atomic_inc_unchecked(&vcc->stats->rx);
25891 break;
25892
25893 case PKT_STATUS:
25894 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
25895 char msg[500];
25896 char item[10];
25897
25898 + pax_track_stack();
25899 +
25900 len = buf->len;
25901 for (i = 0; i < len; i++){
25902 if(i % 8 == 0)
25903 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
25904 vcc = SKB_CB(oldskb)->vcc;
25905
25906 if (vcc) {
25907 - atomic_inc(&vcc->stats->tx);
25908 + atomic_inc_unchecked(&vcc->stats->tx);
25909 solos_pop(vcc, oldskb);
25910 } else
25911 dev_kfree_skb_irq(oldskb);
25912 diff -urNp linux-2.6.32.42/drivers/atm/suni.c linux-2.6.32.42/drivers/atm/suni.c
25913 --- linux-2.6.32.42/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
25914 +++ linux-2.6.32.42/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
25915 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
25916
25917
25918 #define ADD_LIMITED(s,v) \
25919 - atomic_add((v),&stats->s); \
25920 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
25921 + atomic_add_unchecked((v),&stats->s); \
25922 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
25923
25924
25925 static void suni_hz(unsigned long from_timer)
25926 diff -urNp linux-2.6.32.42/drivers/atm/uPD98402.c linux-2.6.32.42/drivers/atm/uPD98402.c
25927 --- linux-2.6.32.42/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
25928 +++ linux-2.6.32.42/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
25929 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
25930 struct sonet_stats tmp;
25931 int error = 0;
25932
25933 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
25934 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
25935 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
25936 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
25937 if (zero && !error) {
25938 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
25939
25940
25941 #define ADD_LIMITED(s,v) \
25942 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
25943 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
25944 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
25945 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
25946 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
25947 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
25948
25949
25950 static void stat_event(struct atm_dev *dev)
25951 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
25952 if (reason & uPD98402_INT_PFM) stat_event(dev);
25953 if (reason & uPD98402_INT_PCO) {
25954 (void) GET(PCOCR); /* clear interrupt cause */
25955 - atomic_add(GET(HECCT),
25956 + atomic_add_unchecked(GET(HECCT),
25957 &PRIV(dev)->sonet_stats.uncorr_hcs);
25958 }
25959 if ((reason & uPD98402_INT_RFO) &&
25960 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
25961 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
25962 uPD98402_INT_LOS),PIMR); /* enable them */
25963 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
25964 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
25965 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
25966 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
25967 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
25968 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
25969 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
25970 return 0;
25971 }
25972
25973 diff -urNp linux-2.6.32.42/drivers/atm/zatm.c linux-2.6.32.42/drivers/atm/zatm.c
25974 --- linux-2.6.32.42/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
25975 +++ linux-2.6.32.42/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
25976 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
25977 }
25978 if (!size) {
25979 dev_kfree_skb_irq(skb);
25980 - if (vcc) atomic_inc(&vcc->stats->rx_err);
25981 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
25982 continue;
25983 }
25984 if (!atm_charge(vcc,skb->truesize)) {
25985 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
25986 skb->len = size;
25987 ATM_SKB(skb)->vcc = vcc;
25988 vcc->push(vcc,skb);
25989 - atomic_inc(&vcc->stats->rx);
25990 + atomic_inc_unchecked(&vcc->stats->rx);
25991 }
25992 zout(pos & 0xffff,MTA(mbx));
25993 #if 0 /* probably a stupid idea */
25994 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
25995 skb_queue_head(&zatm_vcc->backlog,skb);
25996 break;
25997 }
25998 - atomic_inc(&vcc->stats->tx);
25999 + atomic_inc_unchecked(&vcc->stats->tx);
26000 wake_up(&zatm_vcc->tx_wait);
26001 }
26002
26003 diff -urNp linux-2.6.32.42/drivers/base/bus.c linux-2.6.32.42/drivers/base/bus.c
26004 --- linux-2.6.32.42/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
26005 +++ linux-2.6.32.42/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
26006 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
26007 return ret;
26008 }
26009
26010 -static struct sysfs_ops driver_sysfs_ops = {
26011 +static const struct sysfs_ops driver_sysfs_ops = {
26012 .show = drv_attr_show,
26013 .store = drv_attr_store,
26014 };
26015 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
26016 return ret;
26017 }
26018
26019 -static struct sysfs_ops bus_sysfs_ops = {
26020 +static const struct sysfs_ops bus_sysfs_ops = {
26021 .show = bus_attr_show,
26022 .store = bus_attr_store,
26023 };
26024 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
26025 return 0;
26026 }
26027
26028 -static struct kset_uevent_ops bus_uevent_ops = {
26029 +static const struct kset_uevent_ops bus_uevent_ops = {
26030 .filter = bus_uevent_filter,
26031 };
26032
26033 diff -urNp linux-2.6.32.42/drivers/base/class.c linux-2.6.32.42/drivers/base/class.c
26034 --- linux-2.6.32.42/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
26035 +++ linux-2.6.32.42/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
26036 @@ -63,7 +63,7 @@ static void class_release(struct kobject
26037 kfree(cp);
26038 }
26039
26040 -static struct sysfs_ops class_sysfs_ops = {
26041 +static const struct sysfs_ops class_sysfs_ops = {
26042 .show = class_attr_show,
26043 .store = class_attr_store,
26044 };
26045 diff -urNp linux-2.6.32.42/drivers/base/core.c linux-2.6.32.42/drivers/base/core.c
26046 --- linux-2.6.32.42/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
26047 +++ linux-2.6.32.42/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
26048 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
26049 return ret;
26050 }
26051
26052 -static struct sysfs_ops dev_sysfs_ops = {
26053 +static const struct sysfs_ops dev_sysfs_ops = {
26054 .show = dev_attr_show,
26055 .store = dev_attr_store,
26056 };
26057 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
26058 return retval;
26059 }
26060
26061 -static struct kset_uevent_ops device_uevent_ops = {
26062 +static const struct kset_uevent_ops device_uevent_ops = {
26063 .filter = dev_uevent_filter,
26064 .name = dev_uevent_name,
26065 .uevent = dev_uevent,
26066 diff -urNp linux-2.6.32.42/drivers/base/memory.c linux-2.6.32.42/drivers/base/memory.c
26067 --- linux-2.6.32.42/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
26068 +++ linux-2.6.32.42/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
26069 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
26070 return retval;
26071 }
26072
26073 -static struct kset_uevent_ops memory_uevent_ops = {
26074 +static const struct kset_uevent_ops memory_uevent_ops = {
26075 .name = memory_uevent_name,
26076 .uevent = memory_uevent,
26077 };
26078 diff -urNp linux-2.6.32.42/drivers/base/sys.c linux-2.6.32.42/drivers/base/sys.c
26079 --- linux-2.6.32.42/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
26080 +++ linux-2.6.32.42/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
26081 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
26082 return -EIO;
26083 }
26084
26085 -static struct sysfs_ops sysfs_ops = {
26086 +static const struct sysfs_ops sysfs_ops = {
26087 .show = sysdev_show,
26088 .store = sysdev_store,
26089 };
26090 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
26091 return -EIO;
26092 }
26093
26094 -static struct sysfs_ops sysfs_class_ops = {
26095 +static const struct sysfs_ops sysfs_class_ops = {
26096 .show = sysdev_class_show,
26097 .store = sysdev_class_store,
26098 };
26099 diff -urNp linux-2.6.32.42/drivers/block/cciss.c linux-2.6.32.42/drivers/block/cciss.c
26100 --- linux-2.6.32.42/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
26101 +++ linux-2.6.32.42/drivers/block/cciss.c 2011-04-17 15:56:46.000000000 -0400
26102 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
26103 int err;
26104 u32 cp;
26105
26106 + memset(&arg64, 0, sizeof(arg64));
26107 +
26108 err = 0;
26109 err |=
26110 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26111 diff -urNp linux-2.6.32.42/drivers/block/cpqarray.c linux-2.6.32.42/drivers/block/cpqarray.c
26112 --- linux-2.6.32.42/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
26113 +++ linux-2.6.32.42/drivers/block/cpqarray.c 2011-05-16 21:46:57.000000000 -0400
26114 @@ -896,6 +896,8 @@ static void do_ida_request(struct reques
26115 struct scatterlist tmp_sg[SG_MAX];
26116 int i, dir, seg;
26117
26118 + pax_track_stack();
26119 +
26120 if (blk_queue_plugged(q))
26121 goto startio;
26122
26123 diff -urNp linux-2.6.32.42/drivers/block/DAC960.c linux-2.6.32.42/drivers/block/DAC960.c
26124 --- linux-2.6.32.42/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
26125 +++ linux-2.6.32.42/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
26126 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
26127 unsigned long flags;
26128 int Channel, TargetID;
26129
26130 + pax_track_stack();
26131 +
26132 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
26133 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
26134 sizeof(DAC960_SCSI_Inquiry_T) +
26135 diff -urNp linux-2.6.32.42/drivers/block/nbd.c linux-2.6.32.42/drivers/block/nbd.c
26136 --- linux-2.6.32.42/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
26137 +++ linux-2.6.32.42/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
26138 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
26139 struct kvec iov;
26140 sigset_t blocked, oldset;
26141
26142 + pax_track_stack();
26143 +
26144 if (unlikely(!sock)) {
26145 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
26146 lo->disk->disk_name, (send ? "send" : "recv"));
26147 @@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
26148 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
26149 unsigned int cmd, unsigned long arg)
26150 {
26151 + pax_track_stack();
26152 +
26153 switch (cmd) {
26154 case NBD_DISCONNECT: {
26155 struct request sreq;
26156 diff -urNp linux-2.6.32.42/drivers/block/pktcdvd.c linux-2.6.32.42/drivers/block/pktcdvd.c
26157 --- linux-2.6.32.42/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
26158 +++ linux-2.6.32.42/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
26159 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
26160 return len;
26161 }
26162
26163 -static struct sysfs_ops kobj_pkt_ops = {
26164 +static const struct sysfs_ops kobj_pkt_ops = {
26165 .show = kobj_pkt_show,
26166 .store = kobj_pkt_store
26167 };
26168 diff -urNp linux-2.6.32.42/drivers/char/agp/frontend.c linux-2.6.32.42/drivers/char/agp/frontend.c
26169 --- linux-2.6.32.42/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
26170 +++ linux-2.6.32.42/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
26171 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
26172 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26173 return -EFAULT;
26174
26175 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26176 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26177 return -EFAULT;
26178
26179 client = agp_find_client_by_pid(reserve.pid);
26180 diff -urNp linux-2.6.32.42/drivers/char/briq_panel.c linux-2.6.32.42/drivers/char/briq_panel.c
26181 --- linux-2.6.32.42/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
26182 +++ linux-2.6.32.42/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
26183 @@ -10,6 +10,7 @@
26184 #include <linux/types.h>
26185 #include <linux/errno.h>
26186 #include <linux/tty.h>
26187 +#include <linux/mutex.h>
26188 #include <linux/timer.h>
26189 #include <linux/kernel.h>
26190 #include <linux/wait.h>
26191 @@ -36,6 +37,7 @@ static int vfd_is_open;
26192 static unsigned char vfd[40];
26193 static int vfd_cursor;
26194 static unsigned char ledpb, led;
26195 +static DEFINE_MUTEX(vfd_mutex);
26196
26197 static void update_vfd(void)
26198 {
26199 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
26200 if (!vfd_is_open)
26201 return -EBUSY;
26202
26203 + mutex_lock(&vfd_mutex);
26204 for (;;) {
26205 char c;
26206 if (!indx)
26207 break;
26208 - if (get_user(c, buf))
26209 + if (get_user(c, buf)) {
26210 + mutex_unlock(&vfd_mutex);
26211 return -EFAULT;
26212 + }
26213 if (esc) {
26214 set_led(c);
26215 esc = 0;
26216 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
26217 buf++;
26218 }
26219 update_vfd();
26220 + mutex_unlock(&vfd_mutex);
26221
26222 return len;
26223 }
26224 diff -urNp linux-2.6.32.42/drivers/char/genrtc.c linux-2.6.32.42/drivers/char/genrtc.c
26225 --- linux-2.6.32.42/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
26226 +++ linux-2.6.32.42/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
26227 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
26228 switch (cmd) {
26229
26230 case RTC_PLL_GET:
26231 + memset(&pll, 0, sizeof(pll));
26232 if (get_rtc_pll(&pll))
26233 return -EINVAL;
26234 else
26235 diff -urNp linux-2.6.32.42/drivers/char/hpet.c linux-2.6.32.42/drivers/char/hpet.c
26236 --- linux-2.6.32.42/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
26237 +++ linux-2.6.32.42/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
26238 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
26239 return 0;
26240 }
26241
26242 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
26243 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
26244
26245 static int
26246 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
26247 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
26248 }
26249
26250 static int
26251 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
26252 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
26253 {
26254 struct hpet_timer __iomem *timer;
26255 struct hpet __iomem *hpet;
26256 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
26257 {
26258 struct hpet_info info;
26259
26260 + memset(&info, 0, sizeof(info));
26261 +
26262 if (devp->hd_ireqfreq)
26263 info.hi_ireqfreq =
26264 hpet_time_div(hpetp, devp->hd_ireqfreq);
26265 - else
26266 - info.hi_ireqfreq = 0;
26267 info.hi_flags =
26268 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
26269 info.hi_hpet = hpetp->hp_which;
26270 diff -urNp linux-2.6.32.42/drivers/char/hvc_beat.c linux-2.6.32.42/drivers/char/hvc_beat.c
26271 --- linux-2.6.32.42/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
26272 +++ linux-2.6.32.42/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
26273 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
26274 return cnt;
26275 }
26276
26277 -static struct hv_ops hvc_beat_get_put_ops = {
26278 +static const struct hv_ops hvc_beat_get_put_ops = {
26279 .get_chars = hvc_beat_get_chars,
26280 .put_chars = hvc_beat_put_chars,
26281 };
26282 diff -urNp linux-2.6.32.42/drivers/char/hvc_console.c linux-2.6.32.42/drivers/char/hvc_console.c
26283 --- linux-2.6.32.42/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
26284 +++ linux-2.6.32.42/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
26285 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
26286 * console interfaces but can still be used as a tty device. This has to be
26287 * static because kmalloc will not work during early console init.
26288 */
26289 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26290 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26291 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
26292 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
26293
26294 @@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
26295 * vty adapters do NOT get an hvc_instantiate() callback since they
26296 * appear after early console init.
26297 */
26298 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
26299 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
26300 {
26301 struct hvc_struct *hp;
26302
26303 @@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
26304 };
26305
26306 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
26307 - struct hv_ops *ops, int outbuf_size)
26308 + const struct hv_ops *ops, int outbuf_size)
26309 {
26310 struct hvc_struct *hp;
26311 int i;
26312 diff -urNp linux-2.6.32.42/drivers/char/hvc_console.h linux-2.6.32.42/drivers/char/hvc_console.h
26313 --- linux-2.6.32.42/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
26314 +++ linux-2.6.32.42/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
26315 @@ -55,7 +55,7 @@ struct hvc_struct {
26316 int outbuf_size;
26317 int n_outbuf;
26318 uint32_t vtermno;
26319 - struct hv_ops *ops;
26320 + const struct hv_ops *ops;
26321 int irq_requested;
26322 int data;
26323 struct winsize ws;
26324 @@ -76,11 +76,11 @@ struct hv_ops {
26325 };
26326
26327 /* Register a vterm and a slot index for use as a console (console_init) */
26328 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
26329 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
26330
26331 /* register a vterm for hvc tty operation (module_init or hotplug add) */
26332 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
26333 - struct hv_ops *ops, int outbuf_size);
26334 + const struct hv_ops *ops, int outbuf_size);
26335 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
26336 extern int hvc_remove(struct hvc_struct *hp);
26337
26338 diff -urNp linux-2.6.32.42/drivers/char/hvc_iseries.c linux-2.6.32.42/drivers/char/hvc_iseries.c
26339 --- linux-2.6.32.42/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
26340 +++ linux-2.6.32.42/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
26341 @@ -197,7 +197,7 @@ done:
26342 return sent;
26343 }
26344
26345 -static struct hv_ops hvc_get_put_ops = {
26346 +static const struct hv_ops hvc_get_put_ops = {
26347 .get_chars = get_chars,
26348 .put_chars = put_chars,
26349 .notifier_add = notifier_add_irq,
26350 diff -urNp linux-2.6.32.42/drivers/char/hvc_iucv.c linux-2.6.32.42/drivers/char/hvc_iucv.c
26351 --- linux-2.6.32.42/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
26352 +++ linux-2.6.32.42/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
26353 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
26354
26355
26356 /* HVC operations */
26357 -static struct hv_ops hvc_iucv_ops = {
26358 +static const struct hv_ops hvc_iucv_ops = {
26359 .get_chars = hvc_iucv_get_chars,
26360 .put_chars = hvc_iucv_put_chars,
26361 .notifier_add = hvc_iucv_notifier_add,
26362 diff -urNp linux-2.6.32.42/drivers/char/hvc_rtas.c linux-2.6.32.42/drivers/char/hvc_rtas.c
26363 --- linux-2.6.32.42/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
26364 +++ linux-2.6.32.42/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
26365 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
26366 return i;
26367 }
26368
26369 -static struct hv_ops hvc_rtas_get_put_ops = {
26370 +static const struct hv_ops hvc_rtas_get_put_ops = {
26371 .get_chars = hvc_rtas_read_console,
26372 .put_chars = hvc_rtas_write_console,
26373 };
26374 diff -urNp linux-2.6.32.42/drivers/char/hvcs.c linux-2.6.32.42/drivers/char/hvcs.c
26375 --- linux-2.6.32.42/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
26376 +++ linux-2.6.32.42/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
26377 @@ -82,6 +82,7 @@
26378 #include <asm/hvcserver.h>
26379 #include <asm/uaccess.h>
26380 #include <asm/vio.h>
26381 +#include <asm/local.h>
26382
26383 /*
26384 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
26385 @@ -269,7 +270,7 @@ struct hvcs_struct {
26386 unsigned int index;
26387
26388 struct tty_struct *tty;
26389 - int open_count;
26390 + local_t open_count;
26391
26392 /*
26393 * Used to tell the driver kernel_thread what operations need to take
26394 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
26395
26396 spin_lock_irqsave(&hvcsd->lock, flags);
26397
26398 - if (hvcsd->open_count > 0) {
26399 + if (local_read(&hvcsd->open_count) > 0) {
26400 spin_unlock_irqrestore(&hvcsd->lock, flags);
26401 printk(KERN_INFO "HVCS: vterm state unchanged. "
26402 "The hvcs device node is still in use.\n");
26403 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
26404 if ((retval = hvcs_partner_connect(hvcsd)))
26405 goto error_release;
26406
26407 - hvcsd->open_count = 1;
26408 + local_set(&hvcsd->open_count, 1);
26409 hvcsd->tty = tty;
26410 tty->driver_data = hvcsd;
26411
26412 @@ -1169,7 +1170,7 @@ fast_open:
26413
26414 spin_lock_irqsave(&hvcsd->lock, flags);
26415 kref_get(&hvcsd->kref);
26416 - hvcsd->open_count++;
26417 + local_inc(&hvcsd->open_count);
26418 hvcsd->todo_mask |= HVCS_SCHED_READ;
26419 spin_unlock_irqrestore(&hvcsd->lock, flags);
26420
26421 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
26422 hvcsd = tty->driver_data;
26423
26424 spin_lock_irqsave(&hvcsd->lock, flags);
26425 - if (--hvcsd->open_count == 0) {
26426 + if (local_dec_and_test(&hvcsd->open_count)) {
26427
26428 vio_disable_interrupts(hvcsd->vdev);
26429
26430 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
26431 free_irq(irq, hvcsd);
26432 kref_put(&hvcsd->kref, destroy_hvcs_struct);
26433 return;
26434 - } else if (hvcsd->open_count < 0) {
26435 + } else if (local_read(&hvcsd->open_count) < 0) {
26436 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
26437 " is missmanaged.\n",
26438 - hvcsd->vdev->unit_address, hvcsd->open_count);
26439 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
26440 }
26441
26442 spin_unlock_irqrestore(&hvcsd->lock, flags);
26443 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
26444
26445 spin_lock_irqsave(&hvcsd->lock, flags);
26446 /* Preserve this so that we know how many kref refs to put */
26447 - temp_open_count = hvcsd->open_count;
26448 + temp_open_count = local_read(&hvcsd->open_count);
26449
26450 /*
26451 * Don't kref put inside the spinlock because the destruction
26452 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
26453 hvcsd->tty->driver_data = NULL;
26454 hvcsd->tty = NULL;
26455
26456 - hvcsd->open_count = 0;
26457 + local_set(&hvcsd->open_count, 0);
26458
26459 /* This will drop any buffered data on the floor which is OK in a hangup
26460 * scenario. */
26461 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
26462 * the middle of a write operation? This is a crummy place to do this
26463 * but we want to keep it all in the spinlock.
26464 */
26465 - if (hvcsd->open_count <= 0) {
26466 + if (local_read(&hvcsd->open_count) <= 0) {
26467 spin_unlock_irqrestore(&hvcsd->lock, flags);
26468 return -ENODEV;
26469 }
26470 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
26471 {
26472 struct hvcs_struct *hvcsd = tty->driver_data;
26473
26474 - if (!hvcsd || hvcsd->open_count <= 0)
26475 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
26476 return 0;
26477
26478 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
26479 diff -urNp linux-2.6.32.42/drivers/char/hvc_udbg.c linux-2.6.32.42/drivers/char/hvc_udbg.c
26480 --- linux-2.6.32.42/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
26481 +++ linux-2.6.32.42/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
26482 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
26483 return i;
26484 }
26485
26486 -static struct hv_ops hvc_udbg_ops = {
26487 +static const struct hv_ops hvc_udbg_ops = {
26488 .get_chars = hvc_udbg_get,
26489 .put_chars = hvc_udbg_put,
26490 };
26491 diff -urNp linux-2.6.32.42/drivers/char/hvc_vio.c linux-2.6.32.42/drivers/char/hvc_vio.c
26492 --- linux-2.6.32.42/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
26493 +++ linux-2.6.32.42/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
26494 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
26495 return got;
26496 }
26497
26498 -static struct hv_ops hvc_get_put_ops = {
26499 +static const struct hv_ops hvc_get_put_ops = {
26500 .get_chars = filtered_get_chars,
26501 .put_chars = hvc_put_chars,
26502 .notifier_add = notifier_add_irq,
26503 diff -urNp linux-2.6.32.42/drivers/char/hvc_xen.c linux-2.6.32.42/drivers/char/hvc_xen.c
26504 --- linux-2.6.32.42/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
26505 +++ linux-2.6.32.42/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
26506 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
26507 return recv;
26508 }
26509
26510 -static struct hv_ops hvc_ops = {
26511 +static const struct hv_ops hvc_ops = {
26512 .get_chars = read_console,
26513 .put_chars = write_console,
26514 .notifier_add = notifier_add_irq,
26515 diff -urNp linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c
26516 --- linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
26517 +++ linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
26518 @@ -414,7 +414,7 @@ struct ipmi_smi {
26519 struct proc_dir_entry *proc_dir;
26520 char proc_dir_name[10];
26521
26522 - atomic_t stats[IPMI_NUM_STATS];
26523 + atomic_unchecked_t stats[IPMI_NUM_STATS];
26524
26525 /*
26526 * run_to_completion duplicate of smb_info, smi_info
26527 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26528
26529
26530 #define ipmi_inc_stat(intf, stat) \
26531 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26532 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26533 #define ipmi_get_stat(intf, stat) \
26534 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26535 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26536
26537 static int is_lan_addr(struct ipmi_addr *addr)
26538 {
26539 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
26540 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26541 init_waitqueue_head(&intf->waitq);
26542 for (i = 0; i < IPMI_NUM_STATS; i++)
26543 - atomic_set(&intf->stats[i], 0);
26544 + atomic_set_unchecked(&intf->stats[i], 0);
26545
26546 intf->proc_dir = NULL;
26547
26548 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
26549 struct ipmi_smi_msg smi_msg;
26550 struct ipmi_recv_msg recv_msg;
26551
26552 + pax_track_stack();
26553 +
26554 si = (struct ipmi_system_interface_addr *) &addr;
26555 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
26556 si->channel = IPMI_BMC_CHANNEL;
26557 diff -urNp linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c
26558 --- linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
26559 +++ linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
26560 @@ -277,7 +277,7 @@ struct smi_info {
26561 unsigned char slave_addr;
26562
26563 /* Counters and things for the proc filesystem. */
26564 - atomic_t stats[SI_NUM_STATS];
26565 + atomic_unchecked_t stats[SI_NUM_STATS];
26566
26567 struct task_struct *thread;
26568
26569 @@ -285,9 +285,9 @@ struct smi_info {
26570 };
26571
26572 #define smi_inc_stat(smi, stat) \
26573 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
26574 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
26575 #define smi_get_stat(smi, stat) \
26576 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
26577 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
26578
26579 #define SI_MAX_PARMS 4
26580
26581 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
26582 atomic_set(&new_smi->req_events, 0);
26583 new_smi->run_to_completion = 0;
26584 for (i = 0; i < SI_NUM_STATS; i++)
26585 - atomic_set(&new_smi->stats[i], 0);
26586 + atomic_set_unchecked(&new_smi->stats[i], 0);
26587
26588 new_smi->interrupt_disabled = 0;
26589 atomic_set(&new_smi->stop_operation, 0);
26590 diff -urNp linux-2.6.32.42/drivers/char/istallion.c linux-2.6.32.42/drivers/char/istallion.c
26591 --- linux-2.6.32.42/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
26592 +++ linux-2.6.32.42/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
26593 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
26594 * re-used for each stats call.
26595 */
26596 static comstats_t stli_comstats;
26597 -static combrd_t stli_brdstats;
26598 static struct asystats stli_cdkstats;
26599
26600 /*****************************************************************************/
26601 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
26602 {
26603 struct stlibrd *brdp;
26604 unsigned int i;
26605 + combrd_t stli_brdstats;
26606
26607 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
26608 return -EFAULT;
26609 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
26610 struct stliport stli_dummyport;
26611 struct stliport *portp;
26612
26613 + pax_track_stack();
26614 +
26615 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
26616 return -EFAULT;
26617 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
26618 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
26619 struct stlibrd stli_dummybrd;
26620 struct stlibrd *brdp;
26621
26622 + pax_track_stack();
26623 +
26624 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
26625 return -EFAULT;
26626 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
26627 diff -urNp linux-2.6.32.42/drivers/char/Kconfig linux-2.6.32.42/drivers/char/Kconfig
26628 --- linux-2.6.32.42/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
26629 +++ linux-2.6.32.42/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
26630 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
26631
26632 config DEVKMEM
26633 bool "/dev/kmem virtual device support"
26634 - default y
26635 + default n
26636 + depends on !GRKERNSEC_KMEM
26637 help
26638 Say Y here if you want to support the /dev/kmem device. The
26639 /dev/kmem device is rarely used, but can be used for certain
26640 @@ -1114,6 +1115,7 @@ config DEVPORT
26641 bool
26642 depends on !M68K
26643 depends on ISA || PCI
26644 + depends on !GRKERNSEC_KMEM
26645 default y
26646
26647 source "drivers/s390/char/Kconfig"
26648 diff -urNp linux-2.6.32.42/drivers/char/keyboard.c linux-2.6.32.42/drivers/char/keyboard.c
26649 --- linux-2.6.32.42/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
26650 +++ linux-2.6.32.42/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
26651 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
26652 kbd->kbdmode == VC_MEDIUMRAW) &&
26653 value != KVAL(K_SAK))
26654 return; /* SAK is allowed even in raw mode */
26655 +
26656 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
26657 + {
26658 + void *func = fn_handler[value];
26659 + if (func == fn_show_state || func == fn_show_ptregs ||
26660 + func == fn_show_mem)
26661 + return;
26662 + }
26663 +#endif
26664 +
26665 fn_handler[value](vc);
26666 }
26667
26668 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
26669 .evbit = { BIT_MASK(EV_SND) },
26670 },
26671
26672 - { }, /* Terminating entry */
26673 + { 0 }, /* Terminating entry */
26674 };
26675
26676 MODULE_DEVICE_TABLE(input, kbd_ids);
26677 diff -urNp linux-2.6.32.42/drivers/char/mem.c linux-2.6.32.42/drivers/char/mem.c
26678 --- linux-2.6.32.42/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
26679 +++ linux-2.6.32.42/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
26680 @@ -18,6 +18,7 @@
26681 #include <linux/raw.h>
26682 #include <linux/tty.h>
26683 #include <linux/capability.h>
26684 +#include <linux/security.h>
26685 #include <linux/ptrace.h>
26686 #include <linux/device.h>
26687 #include <linux/highmem.h>
26688 @@ -35,6 +36,10 @@
26689 # include <linux/efi.h>
26690 #endif
26691
26692 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26693 +extern struct file_operations grsec_fops;
26694 +#endif
26695 +
26696 static inline unsigned long size_inside_page(unsigned long start,
26697 unsigned long size)
26698 {
26699 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
26700
26701 while (cursor < to) {
26702 if (!devmem_is_allowed(pfn)) {
26703 +#ifdef CONFIG_GRKERNSEC_KMEM
26704 + gr_handle_mem_readwrite(from, to);
26705 +#else
26706 printk(KERN_INFO
26707 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
26708 current->comm, from, to);
26709 +#endif
26710 return 0;
26711 }
26712 cursor += PAGE_SIZE;
26713 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
26714 }
26715 return 1;
26716 }
26717 +#elif defined(CONFIG_GRKERNSEC_KMEM)
26718 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26719 +{
26720 + return 0;
26721 +}
26722 #else
26723 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26724 {
26725 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
26726 #endif
26727
26728 while (count > 0) {
26729 + char *temp;
26730 +
26731 /*
26732 * Handle first page in case it's not aligned
26733 */
26734 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
26735 if (!ptr)
26736 return -EFAULT;
26737
26738 - if (copy_to_user(buf, ptr, sz)) {
26739 +#ifdef CONFIG_PAX_USERCOPY
26740 + temp = kmalloc(sz, GFP_KERNEL);
26741 + if (!temp) {
26742 + unxlate_dev_mem_ptr(p, ptr);
26743 + return -ENOMEM;
26744 + }
26745 + memcpy(temp, ptr, sz);
26746 +#else
26747 + temp = ptr;
26748 +#endif
26749 +
26750 + if (copy_to_user(buf, temp, sz)) {
26751 +
26752 +#ifdef CONFIG_PAX_USERCOPY
26753 + kfree(temp);
26754 +#endif
26755 +
26756 unxlate_dev_mem_ptr(p, ptr);
26757 return -EFAULT;
26758 }
26759
26760 +#ifdef CONFIG_PAX_USERCOPY
26761 + kfree(temp);
26762 +#endif
26763 +
26764 unxlate_dev_mem_ptr(p, ptr);
26765
26766 buf += sz;
26767 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
26768 size_t count, loff_t *ppos)
26769 {
26770 unsigned long p = *ppos;
26771 - ssize_t low_count, read, sz;
26772 + ssize_t low_count, read, sz, err = 0;
26773 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
26774 - int err = 0;
26775
26776 read = 0;
26777 if (p < (unsigned long) high_memory) {
26778 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
26779 }
26780 #endif
26781 while (low_count > 0) {
26782 + char *temp;
26783 +
26784 sz = size_inside_page(p, low_count);
26785
26786 /*
26787 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
26788 */
26789 kbuf = xlate_dev_kmem_ptr((char *)p);
26790
26791 - if (copy_to_user(buf, kbuf, sz))
26792 +#ifdef CONFIG_PAX_USERCOPY
26793 + temp = kmalloc(sz, GFP_KERNEL);
26794 + if (!temp)
26795 + return -ENOMEM;
26796 + memcpy(temp, kbuf, sz);
26797 +#else
26798 + temp = kbuf;
26799 +#endif
26800 +
26801 + err = copy_to_user(buf, temp, sz);
26802 +
26803 +#ifdef CONFIG_PAX_USERCOPY
26804 + kfree(temp);
26805 +#endif
26806 +
26807 + if (err)
26808 return -EFAULT;
26809 buf += sz;
26810 p += sz;
26811 @@ -889,6 +941,9 @@ static const struct memdev {
26812 #ifdef CONFIG_CRASH_DUMP
26813 [12] = { "oldmem", 0, &oldmem_fops, NULL },
26814 #endif
26815 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26816 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
26817 +#endif
26818 };
26819
26820 static int memory_open(struct inode *inode, struct file *filp)
26821 diff -urNp linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c
26822 --- linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
26823 +++ linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
26824 @@ -29,6 +29,7 @@
26825 #include <linux/tty_driver.h>
26826 #include <linux/tty_flip.h>
26827 #include <linux/uaccess.h>
26828 +#include <asm/local.h>
26829
26830 #include "tty.h"
26831 #include "network.h"
26832 @@ -51,7 +52,7 @@ struct ipw_tty {
26833 int tty_type;
26834 struct ipw_network *network;
26835 struct tty_struct *linux_tty;
26836 - int open_count;
26837 + local_t open_count;
26838 unsigned int control_lines;
26839 struct mutex ipw_tty_mutex;
26840 int tx_bytes_queued;
26841 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
26842 mutex_unlock(&tty->ipw_tty_mutex);
26843 return -ENODEV;
26844 }
26845 - if (tty->open_count == 0)
26846 + if (local_read(&tty->open_count) == 0)
26847 tty->tx_bytes_queued = 0;
26848
26849 - tty->open_count++;
26850 + local_inc(&tty->open_count);
26851
26852 tty->linux_tty = linux_tty;
26853 linux_tty->driver_data = tty;
26854 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
26855
26856 static void do_ipw_close(struct ipw_tty *tty)
26857 {
26858 - tty->open_count--;
26859 -
26860 - if (tty->open_count == 0) {
26861 + if (local_dec_return(&tty->open_count) == 0) {
26862 struct tty_struct *linux_tty = tty->linux_tty;
26863
26864 if (linux_tty != NULL) {
26865 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
26866 return;
26867
26868 mutex_lock(&tty->ipw_tty_mutex);
26869 - if (tty->open_count == 0) {
26870 + if (local_read(&tty->open_count) == 0) {
26871 mutex_unlock(&tty->ipw_tty_mutex);
26872 return;
26873 }
26874 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
26875 return;
26876 }
26877
26878 - if (!tty->open_count) {
26879 + if (!local_read(&tty->open_count)) {
26880 mutex_unlock(&tty->ipw_tty_mutex);
26881 return;
26882 }
26883 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
26884 return -ENODEV;
26885
26886 mutex_lock(&tty->ipw_tty_mutex);
26887 - if (!tty->open_count) {
26888 + if (!local_read(&tty->open_count)) {
26889 mutex_unlock(&tty->ipw_tty_mutex);
26890 return -EINVAL;
26891 }
26892 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
26893 if (!tty)
26894 return -ENODEV;
26895
26896 - if (!tty->open_count)
26897 + if (!local_read(&tty->open_count))
26898 return -EINVAL;
26899
26900 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
26901 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
26902 if (!tty)
26903 return 0;
26904
26905 - if (!tty->open_count)
26906 + if (!local_read(&tty->open_count))
26907 return 0;
26908
26909 return tty->tx_bytes_queued;
26910 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
26911 if (!tty)
26912 return -ENODEV;
26913
26914 - if (!tty->open_count)
26915 + if (!local_read(&tty->open_count))
26916 return -EINVAL;
26917
26918 return get_control_lines(tty);
26919 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
26920 if (!tty)
26921 return -ENODEV;
26922
26923 - if (!tty->open_count)
26924 + if (!local_read(&tty->open_count))
26925 return -EINVAL;
26926
26927 return set_control_lines(tty, set, clear);
26928 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
26929 if (!tty)
26930 return -ENODEV;
26931
26932 - if (!tty->open_count)
26933 + if (!local_read(&tty->open_count))
26934 return -EINVAL;
26935
26936 /* FIXME: Exactly how is the tty object locked here .. */
26937 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
26938 against a parallel ioctl etc */
26939 mutex_lock(&ttyj->ipw_tty_mutex);
26940 }
26941 - while (ttyj->open_count)
26942 + while (local_read(&ttyj->open_count))
26943 do_ipw_close(ttyj);
26944 ipwireless_disassociate_network_ttys(network,
26945 ttyj->channel_idx);
26946 diff -urNp linux-2.6.32.42/drivers/char/pty.c linux-2.6.32.42/drivers/char/pty.c
26947 --- linux-2.6.32.42/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
26948 +++ linux-2.6.32.42/drivers/char/pty.c 2011-04-17 15:56:46.000000000 -0400
26949 @@ -682,7 +682,18 @@ static int ptmx_open(struct inode *inode
26950 return ret;
26951 }
26952
26953 -static struct file_operations ptmx_fops;
26954 +static const struct file_operations ptmx_fops = {
26955 + .llseek = no_llseek,
26956 + .read = tty_read,
26957 + .write = tty_write,
26958 + .poll = tty_poll,
26959 + .unlocked_ioctl = tty_ioctl,
26960 + .compat_ioctl = tty_compat_ioctl,
26961 + .open = ptmx_open,
26962 + .release = tty_release,
26963 + .fasync = tty_fasync,
26964 +};
26965 +
26966
26967 static void __init unix98_pty_init(void)
26968 {
26969 @@ -736,9 +747,6 @@ static void __init unix98_pty_init(void)
26970 register_sysctl_table(pty_root_table);
26971
26972 /* Now create the /dev/ptmx special device */
26973 - tty_default_fops(&ptmx_fops);
26974 - ptmx_fops.open = ptmx_open;
26975 -
26976 cdev_init(&ptmx_cdev, &ptmx_fops);
26977 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
26978 register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
26979 diff -urNp linux-2.6.32.42/drivers/char/random.c linux-2.6.32.42/drivers/char/random.c
26980 --- linux-2.6.32.42/drivers/char/random.c 2011-03-27 14:31:47.000000000 -0400
26981 +++ linux-2.6.32.42/drivers/char/random.c 2011-04-17 15:56:46.000000000 -0400
26982 @@ -254,8 +254,13 @@
26983 /*
26984 * Configuration information
26985 */
26986 +#ifdef CONFIG_GRKERNSEC_RANDNET
26987 +#define INPUT_POOL_WORDS 512
26988 +#define OUTPUT_POOL_WORDS 128
26989 +#else
26990 #define INPUT_POOL_WORDS 128
26991 #define OUTPUT_POOL_WORDS 32
26992 +#endif
26993 #define SEC_XFER_SIZE 512
26994
26995 /*
26996 @@ -292,10 +297,17 @@ static struct poolinfo {
26997 int poolwords;
26998 int tap1, tap2, tap3, tap4, tap5;
26999 } poolinfo_table[] = {
27000 +#ifdef CONFIG_GRKERNSEC_RANDNET
27001 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27002 + { 512, 411, 308, 208, 104, 1 },
27003 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27004 + { 128, 103, 76, 51, 25, 1 },
27005 +#else
27006 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27007 { 128, 103, 76, 51, 25, 1 },
27008 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27009 { 32, 26, 20, 14, 7, 1 },
27010 +#endif
27011 #if 0
27012 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27013 { 2048, 1638, 1231, 819, 411, 1 },
27014 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27015 #include <linux/sysctl.h>
27016
27017 static int min_read_thresh = 8, min_write_thresh;
27018 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
27019 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27020 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27021 static char sysctl_bootid[16];
27022
27023 diff -urNp linux-2.6.32.42/drivers/char/rocket.c linux-2.6.32.42/drivers/char/rocket.c
27024 --- linux-2.6.32.42/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
27025 +++ linux-2.6.32.42/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
27026 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
27027 struct rocket_ports tmp;
27028 int board;
27029
27030 + pax_track_stack();
27031 +
27032 if (!retports)
27033 return -EFAULT;
27034 memset(&tmp, 0, sizeof (tmp));
27035 diff -urNp linux-2.6.32.42/drivers/char/sonypi.c linux-2.6.32.42/drivers/char/sonypi.c
27036 --- linux-2.6.32.42/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
27037 +++ linux-2.6.32.42/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
27038 @@ -55,6 +55,7 @@
27039 #include <asm/uaccess.h>
27040 #include <asm/io.h>
27041 #include <asm/system.h>
27042 +#include <asm/local.h>
27043
27044 #include <linux/sonypi.h>
27045
27046 @@ -491,7 +492,7 @@ static struct sonypi_device {
27047 spinlock_t fifo_lock;
27048 wait_queue_head_t fifo_proc_list;
27049 struct fasync_struct *fifo_async;
27050 - int open_count;
27051 + local_t open_count;
27052 int model;
27053 struct input_dev *input_jog_dev;
27054 struct input_dev *input_key_dev;
27055 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
27056 static int sonypi_misc_release(struct inode *inode, struct file *file)
27057 {
27058 mutex_lock(&sonypi_device.lock);
27059 - sonypi_device.open_count--;
27060 + local_dec(&sonypi_device.open_count);
27061 mutex_unlock(&sonypi_device.lock);
27062 return 0;
27063 }
27064 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
27065 lock_kernel();
27066 mutex_lock(&sonypi_device.lock);
27067 /* Flush input queue on first open */
27068 - if (!sonypi_device.open_count)
27069 + if (!local_read(&sonypi_device.open_count))
27070 kfifo_reset(sonypi_device.fifo);
27071 - sonypi_device.open_count++;
27072 + local_inc(&sonypi_device.open_count);
27073 mutex_unlock(&sonypi_device.lock);
27074 unlock_kernel();
27075 return 0;
27076 diff -urNp linux-2.6.32.42/drivers/char/stallion.c linux-2.6.32.42/drivers/char/stallion.c
27077 --- linux-2.6.32.42/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
27078 +++ linux-2.6.32.42/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
27079 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
27080 struct stlport stl_dummyport;
27081 struct stlport *portp;
27082
27083 + pax_track_stack();
27084 +
27085 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
27086 return -EFAULT;
27087 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
27088 diff -urNp linux-2.6.32.42/drivers/char/tpm/tpm_bios.c linux-2.6.32.42/drivers/char/tpm/tpm_bios.c
27089 --- linux-2.6.32.42/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
27090 +++ linux-2.6.32.42/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
27091 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
27092 event = addr;
27093
27094 if ((event->event_type == 0 && event->event_size == 0) ||
27095 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27096 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27097 return NULL;
27098
27099 return addr;
27100 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
27101 return NULL;
27102
27103 if ((event->event_type == 0 && event->event_size == 0) ||
27104 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27105 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27106 return NULL;
27107
27108 (*pos)++;
27109 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
27110 int i;
27111
27112 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27113 - seq_putc(m, data[i]);
27114 + if (!seq_putc(m, data[i]))
27115 + return -EFAULT;
27116
27117 return 0;
27118 }
27119 @@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
27120 log->bios_event_log_end = log->bios_event_log + len;
27121
27122 virt = acpi_os_map_memory(start, len);
27123 + if (!virt) {
27124 + kfree(log->bios_event_log);
27125 + log->bios_event_log = NULL;
27126 + return -EFAULT;
27127 + }
27128
27129 memcpy(log->bios_event_log, virt, len);
27130
27131 diff -urNp linux-2.6.32.42/drivers/char/tpm/tpm.c linux-2.6.32.42/drivers/char/tpm/tpm.c
27132 --- linux-2.6.32.42/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
27133 +++ linux-2.6.32.42/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
27134 @@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
27135 chip->vendor.req_complete_val)
27136 goto out_recv;
27137
27138 - if ((status == chip->vendor.req_canceled)) {
27139 + if (status == chip->vendor.req_canceled) {
27140 dev_err(chip->dev, "Operation Canceled\n");
27141 rc = -ECANCELED;
27142 goto out;
27143 @@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
27144
27145 struct tpm_chip *chip = dev_get_drvdata(dev);
27146
27147 + pax_track_stack();
27148 +
27149 tpm_cmd.header.in = tpm_readpubek_header;
27150 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
27151 "attempting to read the PUBEK");
27152 diff -urNp linux-2.6.32.42/drivers/char/tty_io.c linux-2.6.32.42/drivers/char/tty_io.c
27153 --- linux-2.6.32.42/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
27154 +++ linux-2.6.32.42/drivers/char/tty_io.c 2011-04-17 15:56:46.000000000 -0400
27155 @@ -136,21 +136,10 @@ LIST_HEAD(tty_drivers); /* linked list
27156 DEFINE_MUTEX(tty_mutex);
27157 EXPORT_SYMBOL(tty_mutex);
27158
27159 -static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
27160 -static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
27161 ssize_t redirected_tty_write(struct file *, const char __user *,
27162 size_t, loff_t *);
27163 -static unsigned int tty_poll(struct file *, poll_table *);
27164 static int tty_open(struct inode *, struct file *);
27165 -static int tty_release(struct inode *, struct file *);
27166 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
27167 -#ifdef CONFIG_COMPAT
27168 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27169 - unsigned long arg);
27170 -#else
27171 -#define tty_compat_ioctl NULL
27172 -#endif
27173 -static int tty_fasync(int fd, struct file *filp, int on);
27174 static void release_tty(struct tty_struct *tty, int idx);
27175 static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27176 static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27177 @@ -870,7 +859,7 @@ EXPORT_SYMBOL(start_tty);
27178 * read calls may be outstanding in parallel.
27179 */
27180
27181 -static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27182 +ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27183 loff_t *ppos)
27184 {
27185 int i;
27186 @@ -898,6 +887,8 @@ static ssize_t tty_read(struct file *fil
27187 return i;
27188 }
27189
27190 +EXPORT_SYMBOL(tty_read);
27191 +
27192 void tty_write_unlock(struct tty_struct *tty)
27193 {
27194 mutex_unlock(&tty->atomic_write_lock);
27195 @@ -1045,7 +1036,7 @@ void tty_write_message(struct tty_struct
27196 * write method will not be invoked in parallel for each device.
27197 */
27198
27199 -static ssize_t tty_write(struct file *file, const char __user *buf,
27200 +ssize_t tty_write(struct file *file, const char __user *buf,
27201 size_t count, loff_t *ppos)
27202 {
27203 struct tty_struct *tty;
27204 @@ -1072,6 +1063,8 @@ static ssize_t tty_write(struct file *fi
27205 return ret;
27206 }
27207
27208 +EXPORT_SYMBOL(tty_write);
27209 +
27210 ssize_t redirected_tty_write(struct file *file, const char __user *buf,
27211 size_t count, loff_t *ppos)
27212 {
27213 @@ -1867,7 +1860,7 @@ static int tty_open(struct inode *inode,
27214 * Takes bkl. See tty_release_dev
27215 */
27216
27217 -static int tty_release(struct inode *inode, struct file *filp)
27218 +int tty_release(struct inode *inode, struct file *filp)
27219 {
27220 lock_kernel();
27221 tty_release_dev(filp);
27222 @@ -1875,6 +1868,8 @@ static int tty_release(struct inode *ino
27223 return 0;
27224 }
27225
27226 +EXPORT_SYMBOL(tty_release);
27227 +
27228 /**
27229 * tty_poll - check tty status
27230 * @filp: file being polled
27231 @@ -1887,7 +1882,7 @@ static int tty_release(struct inode *ino
27232 * may be re-entered freely by other callers.
27233 */
27234
27235 -static unsigned int tty_poll(struct file *filp, poll_table *wait)
27236 +unsigned int tty_poll(struct file *filp, poll_table *wait)
27237 {
27238 struct tty_struct *tty;
27239 struct tty_ldisc *ld;
27240 @@ -1904,7 +1899,9 @@ static unsigned int tty_poll(struct file
27241 return ret;
27242 }
27243
27244 -static int tty_fasync(int fd, struct file *filp, int on)
27245 +EXPORT_SYMBOL(tty_poll);
27246 +
27247 +int tty_fasync(int fd, struct file *filp, int on)
27248 {
27249 struct tty_struct *tty;
27250 unsigned long flags;
27251 @@ -1948,6 +1945,8 @@ out:
27252 return retval;
27253 }
27254
27255 +EXPORT_SYMBOL(tty_fasync);
27256 +
27257 /**
27258 * tiocsti - fake input character
27259 * @tty: tty to fake input into
27260 @@ -2582,8 +2581,10 @@ long tty_ioctl(struct file *file, unsign
27261 return retval;
27262 }
27263
27264 +EXPORT_SYMBOL(tty_ioctl);
27265 +
27266 #ifdef CONFIG_COMPAT
27267 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27268 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
27269 unsigned long arg)
27270 {
27271 struct inode *inode = file->f_dentry->d_inode;
27272 @@ -2607,6 +2608,8 @@ static long tty_compat_ioctl(struct file
27273
27274 return retval;
27275 }
27276 +
27277 +EXPORT_SYMBOL(tty_compat_ioctl);
27278 #endif
27279
27280 /*
27281 @@ -3050,11 +3053,6 @@ struct tty_struct *get_current_tty(void)
27282 }
27283 EXPORT_SYMBOL_GPL(get_current_tty);
27284
27285 -void tty_default_fops(struct file_operations *fops)
27286 -{
27287 - *fops = tty_fops;
27288 -}
27289 -
27290 /*
27291 * Initialize the console device. This is called *early*, so
27292 * we can't necessarily depend on lots of kernel help here.
27293 diff -urNp linux-2.6.32.42/drivers/char/tty_ldisc.c linux-2.6.32.42/drivers/char/tty_ldisc.c
27294 --- linux-2.6.32.42/drivers/char/tty_ldisc.c 2011-03-27 14:31:47.000000000 -0400
27295 +++ linux-2.6.32.42/drivers/char/tty_ldisc.c 2011-04-17 15:56:46.000000000 -0400
27296 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
27297 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
27298 struct tty_ldisc_ops *ldo = ld->ops;
27299
27300 - ldo->refcount--;
27301 + atomic_dec(&ldo->refcount);
27302 module_put(ldo->owner);
27303 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27304
27305 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
27306 spin_lock_irqsave(&tty_ldisc_lock, flags);
27307 tty_ldiscs[disc] = new_ldisc;
27308 new_ldisc->num = disc;
27309 - new_ldisc->refcount = 0;
27310 + atomic_set(&new_ldisc->refcount, 0);
27311 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27312
27313 return ret;
27314 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
27315 return -EINVAL;
27316
27317 spin_lock_irqsave(&tty_ldisc_lock, flags);
27318 - if (tty_ldiscs[disc]->refcount)
27319 + if (atomic_read(&tty_ldiscs[disc]->refcount))
27320 ret = -EBUSY;
27321 else
27322 tty_ldiscs[disc] = NULL;
27323 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
27324 if (ldops) {
27325 ret = ERR_PTR(-EAGAIN);
27326 if (try_module_get(ldops->owner)) {
27327 - ldops->refcount++;
27328 + atomic_inc(&ldops->refcount);
27329 ret = ldops;
27330 }
27331 }
27332 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
27333 unsigned long flags;
27334
27335 spin_lock_irqsave(&tty_ldisc_lock, flags);
27336 - ldops->refcount--;
27337 + atomic_dec(&ldops->refcount);
27338 module_put(ldops->owner);
27339 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27340 }
27341 diff -urNp linux-2.6.32.42/drivers/char/virtio_console.c linux-2.6.32.42/drivers/char/virtio_console.c
27342 --- linux-2.6.32.42/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
27343 +++ linux-2.6.32.42/drivers/char/virtio_console.c 2011-04-17 15:56:46.000000000 -0400
27344 @@ -44,6 +44,7 @@ static unsigned int in_len;
27345 static char *in, *inbuf;
27346
27347 /* The operations for our console. */
27348 +/* cannot be const */
27349 static struct hv_ops virtio_cons;
27350
27351 /* The hvc device */
27352 diff -urNp linux-2.6.32.42/drivers/char/vt.c linux-2.6.32.42/drivers/char/vt.c
27353 --- linux-2.6.32.42/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
27354 +++ linux-2.6.32.42/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
27355 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
27356
27357 static void notify_write(struct vc_data *vc, unsigned int unicode)
27358 {
27359 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
27360 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
27361 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
27362 }
27363
27364 diff -urNp linux-2.6.32.42/drivers/char/vt_ioctl.c linux-2.6.32.42/drivers/char/vt_ioctl.c
27365 --- linux-2.6.32.42/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
27366 +++ linux-2.6.32.42/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
27367 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
27368 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
27369 return -EFAULT;
27370
27371 - if (!capable(CAP_SYS_TTY_CONFIG))
27372 - perm = 0;
27373 -
27374 switch (cmd) {
27375 case KDGKBENT:
27376 key_map = key_maps[s];
27377 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
27378 val = (i ? K_HOLE : K_NOSUCHMAP);
27379 return put_user(val, &user_kbe->kb_value);
27380 case KDSKBENT:
27381 + if (!capable(CAP_SYS_TTY_CONFIG))
27382 + perm = 0;
27383 +
27384 if (!perm)
27385 return -EPERM;
27386 +
27387 if (!i && v == K_NOSUCHMAP) {
27388 /* deallocate map */
27389 key_map = key_maps[s];
27390 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
27391 int i, j, k;
27392 int ret;
27393
27394 - if (!capable(CAP_SYS_TTY_CONFIG))
27395 - perm = 0;
27396 -
27397 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
27398 if (!kbs) {
27399 ret = -ENOMEM;
27400 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
27401 kfree(kbs);
27402 return ((p && *p) ? -EOVERFLOW : 0);
27403 case KDSKBSENT:
27404 + if (!capable(CAP_SYS_TTY_CONFIG))
27405 + perm = 0;
27406 +
27407 if (!perm) {
27408 ret = -EPERM;
27409 goto reterr;
27410 diff -urNp linux-2.6.32.42/drivers/cpufreq/cpufreq.c linux-2.6.32.42/drivers/cpufreq/cpufreq.c
27411 --- linux-2.6.32.42/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
27412 +++ linux-2.6.32.42/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
27413 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
27414 complete(&policy->kobj_unregister);
27415 }
27416
27417 -static struct sysfs_ops sysfs_ops = {
27418 +static const struct sysfs_ops sysfs_ops = {
27419 .show = show,
27420 .store = store,
27421 };
27422 diff -urNp linux-2.6.32.42/drivers/cpuidle/sysfs.c linux-2.6.32.42/drivers/cpuidle/sysfs.c
27423 --- linux-2.6.32.42/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
27424 +++ linux-2.6.32.42/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
27425 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
27426 return ret;
27427 }
27428
27429 -static struct sysfs_ops cpuidle_sysfs_ops = {
27430 +static const struct sysfs_ops cpuidle_sysfs_ops = {
27431 .show = cpuidle_show,
27432 .store = cpuidle_store,
27433 };
27434 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
27435 return ret;
27436 }
27437
27438 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
27439 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
27440 .show = cpuidle_state_show,
27441 };
27442
27443 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
27444 .release = cpuidle_state_sysfs_release,
27445 };
27446
27447 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
27448 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
27449 {
27450 kobject_put(&device->kobjs[i]->kobj);
27451 wait_for_completion(&device->kobjs[i]->kobj_unregister);
27452 diff -urNp linux-2.6.32.42/drivers/crypto/hifn_795x.c linux-2.6.32.42/drivers/crypto/hifn_795x.c
27453 --- linux-2.6.32.42/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
27454 +++ linux-2.6.32.42/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
27455 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
27456 0xCA, 0x34, 0x2B, 0x2E};
27457 struct scatterlist sg;
27458
27459 + pax_track_stack();
27460 +
27461 memset(src, 0, sizeof(src));
27462 memset(ctx.key, 0, sizeof(ctx.key));
27463
27464 diff -urNp linux-2.6.32.42/drivers/crypto/padlock-aes.c linux-2.6.32.42/drivers/crypto/padlock-aes.c
27465 --- linux-2.6.32.42/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
27466 +++ linux-2.6.32.42/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
27467 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
27468 struct crypto_aes_ctx gen_aes;
27469 int cpu;
27470
27471 + pax_track_stack();
27472 +
27473 if (key_len % 8) {
27474 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
27475 return -EINVAL;
27476 diff -urNp linux-2.6.32.42/drivers/dma/ioat/dma.c linux-2.6.32.42/drivers/dma/ioat/dma.c
27477 --- linux-2.6.32.42/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
27478 +++ linux-2.6.32.42/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
27479 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
27480 return entry->show(&chan->common, page);
27481 }
27482
27483 -struct sysfs_ops ioat_sysfs_ops = {
27484 +const struct sysfs_ops ioat_sysfs_ops = {
27485 .show = ioat_attr_show,
27486 };
27487
27488 diff -urNp linux-2.6.32.42/drivers/dma/ioat/dma.h linux-2.6.32.42/drivers/dma/ioat/dma.h
27489 --- linux-2.6.32.42/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
27490 +++ linux-2.6.32.42/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
27491 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
27492 unsigned long *phys_complete);
27493 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
27494 void ioat_kobject_del(struct ioatdma_device *device);
27495 -extern struct sysfs_ops ioat_sysfs_ops;
27496 +extern const struct sysfs_ops ioat_sysfs_ops;
27497 extern struct ioat_sysfs_entry ioat_version_attr;
27498 extern struct ioat_sysfs_entry ioat_cap_attr;
27499 #endif /* IOATDMA_H */
27500 diff -urNp linux-2.6.32.42/drivers/edac/edac_device_sysfs.c linux-2.6.32.42/drivers/edac/edac_device_sysfs.c
27501 --- linux-2.6.32.42/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27502 +++ linux-2.6.32.42/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
27503 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
27504 }
27505
27506 /* edac_dev file operations for an 'ctl_info' */
27507 -static struct sysfs_ops device_ctl_info_ops = {
27508 +static const struct sysfs_ops device_ctl_info_ops = {
27509 .show = edac_dev_ctl_info_show,
27510 .store = edac_dev_ctl_info_store
27511 };
27512 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
27513 }
27514
27515 /* edac_dev file operations for an 'instance' */
27516 -static struct sysfs_ops device_instance_ops = {
27517 +static const struct sysfs_ops device_instance_ops = {
27518 .show = edac_dev_instance_show,
27519 .store = edac_dev_instance_store
27520 };
27521 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
27522 }
27523
27524 /* edac_dev file operations for a 'block' */
27525 -static struct sysfs_ops device_block_ops = {
27526 +static const struct sysfs_ops device_block_ops = {
27527 .show = edac_dev_block_show,
27528 .store = edac_dev_block_store
27529 };
27530 diff -urNp linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c
27531 --- linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27532 +++ linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
27533 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
27534 return -EIO;
27535 }
27536
27537 -static struct sysfs_ops csrowfs_ops = {
27538 +static const struct sysfs_ops csrowfs_ops = {
27539 .show = csrowdev_show,
27540 .store = csrowdev_store
27541 };
27542 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
27543 }
27544
27545 /* Intermediate show/store table */
27546 -static struct sysfs_ops mci_ops = {
27547 +static const struct sysfs_ops mci_ops = {
27548 .show = mcidev_show,
27549 .store = mcidev_store
27550 };
27551 diff -urNp linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c
27552 --- linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27553 +++ linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
27554 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
27555 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27556 static int edac_pci_poll_msec = 1000; /* one second workq period */
27557
27558 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
27559 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27560 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27561 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27562
27563 static struct kobject *edac_pci_top_main_kobj;
27564 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27565 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
27566 }
27567
27568 /* fs_ops table */
27569 -static struct sysfs_ops pci_instance_ops = {
27570 +static const struct sysfs_ops pci_instance_ops = {
27571 .show = edac_pci_instance_show,
27572 .store = edac_pci_instance_store
27573 };
27574 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
27575 return -EIO;
27576 }
27577
27578 -static struct sysfs_ops edac_pci_sysfs_ops = {
27579 +static const struct sysfs_ops edac_pci_sysfs_ops = {
27580 .show = edac_pci_dev_show,
27581 .store = edac_pci_dev_store
27582 };
27583 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
27584 edac_printk(KERN_CRIT, EDAC_PCI,
27585 "Signaled System Error on %s\n",
27586 pci_name(dev));
27587 - atomic_inc(&pci_nonparity_count);
27588 + atomic_inc_unchecked(&pci_nonparity_count);
27589 }
27590
27591 if (status & (PCI_STATUS_PARITY)) {
27592 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
27593 "Master Data Parity Error on %s\n",
27594 pci_name(dev));
27595
27596 - atomic_inc(&pci_parity_count);
27597 + atomic_inc_unchecked(&pci_parity_count);
27598 }
27599
27600 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27601 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
27602 "Detected Parity Error on %s\n",
27603 pci_name(dev));
27604
27605 - atomic_inc(&pci_parity_count);
27606 + atomic_inc_unchecked(&pci_parity_count);
27607 }
27608 }
27609
27610 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
27611 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27612 "Signaled System Error on %s\n",
27613 pci_name(dev));
27614 - atomic_inc(&pci_nonparity_count);
27615 + atomic_inc_unchecked(&pci_nonparity_count);
27616 }
27617
27618 if (status & (PCI_STATUS_PARITY)) {
27619 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
27620 "Master Data Parity Error on "
27621 "%s\n", pci_name(dev));
27622
27623 - atomic_inc(&pci_parity_count);
27624 + atomic_inc_unchecked(&pci_parity_count);
27625 }
27626
27627 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27628 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
27629 "Detected Parity Error on %s\n",
27630 pci_name(dev));
27631
27632 - atomic_inc(&pci_parity_count);
27633 + atomic_inc_unchecked(&pci_parity_count);
27634 }
27635 }
27636 }
27637 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
27638 if (!check_pci_errors)
27639 return;
27640
27641 - before_count = atomic_read(&pci_parity_count);
27642 + before_count = atomic_read_unchecked(&pci_parity_count);
27643
27644 /* scan all PCI devices looking for a Parity Error on devices and
27645 * bridges.
27646 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
27647 /* Only if operator has selected panic on PCI Error */
27648 if (edac_pci_get_panic_on_pe()) {
27649 /* If the count is different 'after' from 'before' */
27650 - if (before_count != atomic_read(&pci_parity_count))
27651 + if (before_count != atomic_read_unchecked(&pci_parity_count))
27652 panic("EDAC: PCI Parity Error");
27653 }
27654 }
27655 diff -urNp linux-2.6.32.42/drivers/firewire/core-cdev.c linux-2.6.32.42/drivers/firewire/core-cdev.c
27656 --- linux-2.6.32.42/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
27657 +++ linux-2.6.32.42/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
27658 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
27659 int ret;
27660
27661 if ((request->channels == 0 && request->bandwidth == 0) ||
27662 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27663 - request->bandwidth < 0)
27664 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27665 return -EINVAL;
27666
27667 r = kmalloc(sizeof(*r), GFP_KERNEL);
27668 diff -urNp linux-2.6.32.42/drivers/firewire/core-transaction.c linux-2.6.32.42/drivers/firewire/core-transaction.c
27669 --- linux-2.6.32.42/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
27670 +++ linux-2.6.32.42/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
27671 @@ -36,6 +36,7 @@
27672 #include <linux/string.h>
27673 #include <linux/timer.h>
27674 #include <linux/types.h>
27675 +#include <linux/sched.h>
27676
27677 #include <asm/byteorder.h>
27678
27679 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
27680 struct transaction_callback_data d;
27681 struct fw_transaction t;
27682
27683 + pax_track_stack();
27684 +
27685 init_completion(&d.done);
27686 d.payload = payload;
27687 fw_send_request(card, &t, tcode, destination_id, generation, speed,
27688 diff -urNp linux-2.6.32.42/drivers/firmware/dmi_scan.c linux-2.6.32.42/drivers/firmware/dmi_scan.c
27689 --- linux-2.6.32.42/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
27690 +++ linux-2.6.32.42/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
27691 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
27692 }
27693 }
27694 else {
27695 - /*
27696 - * no iounmap() for that ioremap(); it would be a no-op, but
27697 - * it's so early in setup that sucker gets confused into doing
27698 - * what it shouldn't if we actually call it.
27699 - */
27700 p = dmi_ioremap(0xF0000, 0x10000);
27701 if (p == NULL)
27702 goto error;
27703 diff -urNp linux-2.6.32.42/drivers/firmware/edd.c linux-2.6.32.42/drivers/firmware/edd.c
27704 --- linux-2.6.32.42/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
27705 +++ linux-2.6.32.42/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
27706 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
27707 return ret;
27708 }
27709
27710 -static struct sysfs_ops edd_attr_ops = {
27711 +static const struct sysfs_ops edd_attr_ops = {
27712 .show = edd_attr_show,
27713 };
27714
27715 diff -urNp linux-2.6.32.42/drivers/firmware/efivars.c linux-2.6.32.42/drivers/firmware/efivars.c
27716 --- linux-2.6.32.42/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
27717 +++ linux-2.6.32.42/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
27718 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
27719 return ret;
27720 }
27721
27722 -static struct sysfs_ops efivar_attr_ops = {
27723 +static const struct sysfs_ops efivar_attr_ops = {
27724 .show = efivar_attr_show,
27725 .store = efivar_attr_store,
27726 };
27727 diff -urNp linux-2.6.32.42/drivers/firmware/iscsi_ibft.c linux-2.6.32.42/drivers/firmware/iscsi_ibft.c
27728 --- linux-2.6.32.42/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
27729 +++ linux-2.6.32.42/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
27730 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
27731 return ret;
27732 }
27733
27734 -static struct sysfs_ops ibft_attr_ops = {
27735 +static const struct sysfs_ops ibft_attr_ops = {
27736 .show = ibft_show_attribute,
27737 };
27738
27739 diff -urNp linux-2.6.32.42/drivers/firmware/memmap.c linux-2.6.32.42/drivers/firmware/memmap.c
27740 --- linux-2.6.32.42/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
27741 +++ linux-2.6.32.42/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
27742 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
27743 NULL
27744 };
27745
27746 -static struct sysfs_ops memmap_attr_ops = {
27747 +static const struct sysfs_ops memmap_attr_ops = {
27748 .show = memmap_attr_show,
27749 };
27750
27751 diff -urNp linux-2.6.32.42/drivers/gpio/vr41xx_giu.c linux-2.6.32.42/drivers/gpio/vr41xx_giu.c
27752 --- linux-2.6.32.42/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
27753 +++ linux-2.6.32.42/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
27754 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27755 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27756 maskl, pendl, maskh, pendh);
27757
27758 - atomic_inc(&irq_err_count);
27759 + atomic_inc_unchecked(&irq_err_count);
27760
27761 return -EINVAL;
27762 }
27763 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c
27764 --- linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
27765 +++ linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
27766 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
27767 struct drm_crtc *tmp;
27768 int crtc_mask = 1;
27769
27770 - WARN(!crtc, "checking null crtc?");
27771 + BUG_ON(!crtc);
27772
27773 dev = crtc->dev;
27774
27775 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
27776
27777 adjusted_mode = drm_mode_duplicate(dev, mode);
27778
27779 + pax_track_stack();
27780 +
27781 crtc->enabled = drm_helper_crtc_in_use(crtc);
27782
27783 if (!crtc->enabled)
27784 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_drv.c linux-2.6.32.42/drivers/gpu/drm/drm_drv.c
27785 --- linux-2.6.32.42/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
27786 +++ linux-2.6.32.42/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
27787 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
27788 char *kdata = NULL;
27789
27790 atomic_inc(&dev->ioctl_count);
27791 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27792 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27793 ++file_priv->ioctl_count;
27794
27795 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27796 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_fops.c linux-2.6.32.42/drivers/gpu/drm/drm_fops.c
27797 --- linux-2.6.32.42/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
27798 +++ linux-2.6.32.42/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
27799 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
27800 }
27801
27802 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27803 - atomic_set(&dev->counts[i], 0);
27804 + atomic_set_unchecked(&dev->counts[i], 0);
27805
27806 dev->sigdata.lock = NULL;
27807
27808 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
27809
27810 retcode = drm_open_helper(inode, filp, dev);
27811 if (!retcode) {
27812 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27813 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27814 spin_lock(&dev->count_lock);
27815 - if (!dev->open_count++) {
27816 + if (local_inc_return(&dev->open_count) == 1) {
27817 spin_unlock(&dev->count_lock);
27818 retcode = drm_setup(dev);
27819 goto out;
27820 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
27821
27822 lock_kernel();
27823
27824 - DRM_DEBUG("open_count = %d\n", dev->open_count);
27825 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27826
27827 if (dev->driver->preclose)
27828 dev->driver->preclose(dev, file_priv);
27829 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
27830 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27831 task_pid_nr(current),
27832 (long)old_encode_dev(file_priv->minor->device),
27833 - dev->open_count);
27834 + local_read(&dev->open_count));
27835
27836 /* if the master has gone away we can't do anything with the lock */
27837 if (file_priv->minor->master)
27838 @@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
27839 * End inline drm_release
27840 */
27841
27842 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27843 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27844 spin_lock(&dev->count_lock);
27845 - if (!--dev->open_count) {
27846 + if (local_dec_and_test(&dev->open_count)) {
27847 if (atomic_read(&dev->ioctl_count)) {
27848 DRM_ERROR("Device busy: %d\n",
27849 atomic_read(&dev->ioctl_count));
27850 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_gem.c linux-2.6.32.42/drivers/gpu/drm/drm_gem.c
27851 --- linux-2.6.32.42/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
27852 +++ linux-2.6.32.42/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
27853 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
27854 spin_lock_init(&dev->object_name_lock);
27855 idr_init(&dev->object_name_idr);
27856 atomic_set(&dev->object_count, 0);
27857 - atomic_set(&dev->object_memory, 0);
27858 + atomic_set_unchecked(&dev->object_memory, 0);
27859 atomic_set(&dev->pin_count, 0);
27860 - atomic_set(&dev->pin_memory, 0);
27861 + atomic_set_unchecked(&dev->pin_memory, 0);
27862 atomic_set(&dev->gtt_count, 0);
27863 - atomic_set(&dev->gtt_memory, 0);
27864 + atomic_set_unchecked(&dev->gtt_memory, 0);
27865
27866 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
27867 if (!mm) {
27868 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
27869 goto fput;
27870 }
27871 atomic_inc(&dev->object_count);
27872 - atomic_add(obj->size, &dev->object_memory);
27873 + atomic_add_unchecked(obj->size, &dev->object_memory);
27874 return obj;
27875 fput:
27876 fput(obj->filp);
27877 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
27878
27879 fput(obj->filp);
27880 atomic_dec(&dev->object_count);
27881 - atomic_sub(obj->size, &dev->object_memory);
27882 + atomic_sub_unchecked(obj->size, &dev->object_memory);
27883 kfree(obj);
27884 }
27885 EXPORT_SYMBOL(drm_gem_object_free);
27886 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_info.c linux-2.6.32.42/drivers/gpu/drm/drm_info.c
27887 --- linux-2.6.32.42/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
27888 +++ linux-2.6.32.42/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
27889 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
27890 struct drm_local_map *map;
27891 struct drm_map_list *r_list;
27892
27893 - /* Hardcoded from _DRM_FRAME_BUFFER,
27894 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
27895 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
27896 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
27897 + static const char * const types[] = {
27898 + [_DRM_FRAME_BUFFER] = "FB",
27899 + [_DRM_REGISTERS] = "REG",
27900 + [_DRM_SHM] = "SHM",
27901 + [_DRM_AGP] = "AGP",
27902 + [_DRM_SCATTER_GATHER] = "SG",
27903 + [_DRM_CONSISTENT] = "PCI",
27904 + [_DRM_GEM] = "GEM" };
27905 const char *type;
27906 int i;
27907
27908 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
27909 map = r_list->map;
27910 if (!map)
27911 continue;
27912 - if (map->type < 0 || map->type > 5)
27913 + if (map->type >= ARRAY_SIZE(types))
27914 type = "??";
27915 else
27916 type = types[map->type];
27917 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
27918 struct drm_device *dev = node->minor->dev;
27919
27920 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
27921 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
27922 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
27923 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
27924 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
27925 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
27926 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
27927 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
27928 seq_printf(m, "%d gtt total\n", dev->gtt_total);
27929 return 0;
27930 }
27931 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
27932 mutex_lock(&dev->struct_mutex);
27933 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
27934 atomic_read(&dev->vma_count),
27935 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27936 + NULL, 0);
27937 +#else
27938 high_memory, (u64)virt_to_phys(high_memory));
27939 +#endif
27940
27941 list_for_each_entry(pt, &dev->vmalist, head) {
27942 vma = pt->vma;
27943 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
27944 continue;
27945 seq_printf(m,
27946 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
27947 - pt->pid, vma->vm_start, vma->vm_end,
27948 + pt->pid,
27949 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27950 + 0, 0,
27951 +#else
27952 + vma->vm_start, vma->vm_end,
27953 +#endif
27954 vma->vm_flags & VM_READ ? 'r' : '-',
27955 vma->vm_flags & VM_WRITE ? 'w' : '-',
27956 vma->vm_flags & VM_EXEC ? 'x' : '-',
27957 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
27958 vma->vm_flags & VM_LOCKED ? 'l' : '-',
27959 vma->vm_flags & VM_IO ? 'i' : '-',
27960 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27961 + 0);
27962 +#else
27963 vma->vm_pgoff);
27964 +#endif
27965
27966 #if defined(__i386__)
27967 pgprot = pgprot_val(vma->vm_page_prot);
27968 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c
27969 --- linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
27970 +++ linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
27971 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
27972 stats->data[i].value =
27973 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
27974 else
27975 - stats->data[i].value = atomic_read(&dev->counts[i]);
27976 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
27977 stats->data[i].type = dev->types[i];
27978 }
27979
27980 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_lock.c linux-2.6.32.42/drivers/gpu/drm/drm_lock.c
27981 --- linux-2.6.32.42/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
27982 +++ linux-2.6.32.42/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
27983 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
27984 if (drm_lock_take(&master->lock, lock->context)) {
27985 master->lock.file_priv = file_priv;
27986 master->lock.lock_time = jiffies;
27987 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
27988 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
27989 break; /* Got lock */
27990 }
27991
27992 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
27993 return -EINVAL;
27994 }
27995
27996 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
27997 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
27998
27999 /* kernel_context_switch isn't used by any of the x86 drm
28000 * modules but is required by the Sparc driver.
28001 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c
28002 --- linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
28003 +++ linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
28004 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
28005 dma->buflist[vertex->idx],
28006 vertex->discard, vertex->used);
28007
28008 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28009 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28010 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28011 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28012 sarea_priv->last_enqueue = dev_priv->counter - 1;
28013 sarea_priv->last_dispatch = (int)hw_status[5];
28014
28015 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
28016 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28017 mc->last_render);
28018
28019 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28020 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28021 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28022 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28023 sarea_priv->last_enqueue = dev_priv->counter - 1;
28024 sarea_priv->last_dispatch = (int)hw_status[5];
28025
28026 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h
28027 --- linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
28028 +++ linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
28029 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28030 int page_flipping;
28031
28032 wait_queue_head_t irq_queue;
28033 - atomic_t irq_received;
28034 - atomic_t irq_emitted;
28035 + atomic_unchecked_t irq_received;
28036 + atomic_unchecked_t irq_emitted;
28037
28038 int front_offset;
28039 } drm_i810_private_t;
28040 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h
28041 --- linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
28042 +++ linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
28043 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
28044 int page_flipping;
28045
28046 wait_queue_head_t irq_queue;
28047 - atomic_t irq_received;
28048 - atomic_t irq_emitted;
28049 + atomic_unchecked_t irq_received;
28050 + atomic_unchecked_t irq_emitted;
28051
28052 int use_mi_batchbuffer_start;
28053
28054 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c
28055 --- linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
28056 +++ linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
28057 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
28058
28059 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
28060
28061 - atomic_inc(&dev_priv->irq_received);
28062 + atomic_inc_unchecked(&dev_priv->irq_received);
28063 wake_up_interruptible(&dev_priv->irq_queue);
28064
28065 return IRQ_HANDLED;
28066 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
28067
28068 DRM_DEBUG("%s\n", __func__);
28069
28070 - atomic_inc(&dev_priv->irq_emitted);
28071 + atomic_inc_unchecked(&dev_priv->irq_emitted);
28072
28073 BEGIN_LP_RING(2);
28074 OUT_RING(0);
28075 OUT_RING(GFX_OP_USER_INTERRUPT);
28076 ADVANCE_LP_RING();
28077
28078 - return atomic_read(&dev_priv->irq_emitted);
28079 + return atomic_read_unchecked(&dev_priv->irq_emitted);
28080 }
28081
28082 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
28083 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
28084
28085 DRM_DEBUG("%s\n", __func__);
28086
28087 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
28088 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
28089 return 0;
28090
28091 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
28092 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
28093
28094 for (;;) {
28095 __set_current_state(TASK_INTERRUPTIBLE);
28096 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
28097 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
28098 break;
28099 if ((signed)(end - jiffies) <= 0) {
28100 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
28101 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
28102 I830_WRITE16(I830REG_HWSTAM, 0xffff);
28103 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
28104 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
28105 - atomic_set(&dev_priv->irq_received, 0);
28106 - atomic_set(&dev_priv->irq_emitted, 0);
28107 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28108 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
28109 init_waitqueue_head(&dev_priv->irq_queue);
28110 }
28111
28112 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c
28113 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
28114 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
28115 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
28116 }
28117 }
28118
28119 -struct intel_dvo_dev_ops ch7017_ops = {
28120 +const struct intel_dvo_dev_ops ch7017_ops = {
28121 .init = ch7017_init,
28122 .detect = ch7017_detect,
28123 .mode_valid = ch7017_mode_valid,
28124 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c
28125 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
28126 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
28127 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
28128 }
28129 }
28130
28131 -struct intel_dvo_dev_ops ch7xxx_ops = {
28132 +const struct intel_dvo_dev_ops ch7xxx_ops = {
28133 .init = ch7xxx_init,
28134 .detect = ch7xxx_detect,
28135 .mode_valid = ch7xxx_mode_valid,
28136 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h
28137 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
28138 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
28139 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
28140 *
28141 * \return singly-linked list of modes or NULL if no modes found.
28142 */
28143 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
28144 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
28145
28146 /**
28147 * Clean up driver-specific bits of the output
28148 */
28149 - void (*destroy) (struct intel_dvo_device *dvo);
28150 + void (* const destroy) (struct intel_dvo_device *dvo);
28151
28152 /**
28153 * Debugging hook to dump device registers to log file
28154 */
28155 - void (*dump_regs)(struct intel_dvo_device *dvo);
28156 + void (* const dump_regs)(struct intel_dvo_device *dvo);
28157 };
28158
28159 -extern struct intel_dvo_dev_ops sil164_ops;
28160 -extern struct intel_dvo_dev_ops ch7xxx_ops;
28161 -extern struct intel_dvo_dev_ops ivch_ops;
28162 -extern struct intel_dvo_dev_ops tfp410_ops;
28163 -extern struct intel_dvo_dev_ops ch7017_ops;
28164 +extern const struct intel_dvo_dev_ops sil164_ops;
28165 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
28166 +extern const struct intel_dvo_dev_ops ivch_ops;
28167 +extern const struct intel_dvo_dev_ops tfp410_ops;
28168 +extern const struct intel_dvo_dev_ops ch7017_ops;
28169
28170 #endif /* _INTEL_DVO_H */
28171 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c
28172 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
28173 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
28174 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
28175 }
28176 }
28177
28178 -struct intel_dvo_dev_ops ivch_ops= {
28179 +const struct intel_dvo_dev_ops ivch_ops= {
28180 .init = ivch_init,
28181 .dpms = ivch_dpms,
28182 .save = ivch_save,
28183 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c
28184 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
28185 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
28186 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
28187 }
28188 }
28189
28190 -struct intel_dvo_dev_ops sil164_ops = {
28191 +const struct intel_dvo_dev_ops sil164_ops = {
28192 .init = sil164_init,
28193 .detect = sil164_detect,
28194 .mode_valid = sil164_mode_valid,
28195 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c
28196 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
28197 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
28198 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
28199 }
28200 }
28201
28202 -struct intel_dvo_dev_ops tfp410_ops = {
28203 +const struct intel_dvo_dev_ops tfp410_ops = {
28204 .init = tfp410_init,
28205 .detect = tfp410_detect,
28206 .mode_valid = tfp410_mode_valid,
28207 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c
28208 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
28209 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
28210 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
28211 I915_READ(GTIMR));
28212 }
28213 seq_printf(m, "Interrupts received: %d\n",
28214 - atomic_read(&dev_priv->irq_received));
28215 + atomic_read_unchecked(&dev_priv->irq_received));
28216 if (dev_priv->hw_status_page != NULL) {
28217 seq_printf(m, "Current sequence: %d\n",
28218 i915_get_gem_seqno(dev));
28219 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c
28220 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
28221 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
28222 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
28223 return i915_resume(dev);
28224 }
28225
28226 -static struct vm_operations_struct i915_gem_vm_ops = {
28227 +static const struct vm_operations_struct i915_gem_vm_ops = {
28228 .fault = i915_gem_fault,
28229 .open = drm_gem_vm_open,
28230 .close = drm_gem_vm_close,
28231 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h
28232 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
28233 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h 2011-05-04 17:56:28.000000000 -0400
28234 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
28235 int page_flipping;
28236
28237 wait_queue_head_t irq_queue;
28238 - atomic_t irq_received;
28239 + atomic_unchecked_t irq_received;
28240 /** Protects user_irq_refcount and irq_mask_reg */
28241 spinlock_t user_irq_lock;
28242 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
28243 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c
28244 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
28245 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
28246 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
28247
28248 args->aper_size = dev->gtt_total;
28249 args->aper_available_size = (args->aper_size -
28250 - atomic_read(&dev->pin_memory));
28251 + atomic_read_unchecked(&dev->pin_memory));
28252
28253 return 0;
28254 }
28255 @@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
28256 return -EINVAL;
28257 }
28258
28259 + if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28260 + drm_gem_object_unreference(obj);
28261 + return -EFAULT;
28262 + }
28263 +
28264 if (i915_gem_object_needs_bit17_swizzle(obj)) {
28265 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
28266 } else {
28267 @@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
28268 return -EINVAL;
28269 }
28270
28271 + if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28272 + drm_gem_object_unreference(obj);
28273 + return -EFAULT;
28274 + }
28275 +
28276 /* We can only do the GTT pwrite on untiled buffers, as otherwise
28277 * it would end up going through the fenced access, and we'll get
28278 * different detiling behavior between reading and writing.
28279 @@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
28280
28281 if (obj_priv->gtt_space) {
28282 atomic_dec(&dev->gtt_count);
28283 - atomic_sub(obj->size, &dev->gtt_memory);
28284 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
28285
28286 drm_mm_put_block(obj_priv->gtt_space);
28287 obj_priv->gtt_space = NULL;
28288 @@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
28289 goto search_free;
28290 }
28291 atomic_inc(&dev->gtt_count);
28292 - atomic_add(obj->size, &dev->gtt_memory);
28293 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
28294
28295 /* Assert that the object is not currently in any GPU domain. As it
28296 * wasn't in the GTT, there shouldn't be any way it could have been in
28297 @@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
28298 "%d/%d gtt bytes\n",
28299 atomic_read(&dev->object_count),
28300 atomic_read(&dev->pin_count),
28301 - atomic_read(&dev->object_memory),
28302 - atomic_read(&dev->pin_memory),
28303 - atomic_read(&dev->gtt_memory),
28304 + atomic_read_unchecked(&dev->object_memory),
28305 + atomic_read_unchecked(&dev->pin_memory),
28306 + atomic_read_unchecked(&dev->gtt_memory),
28307 dev->gtt_total);
28308 }
28309 goto err;
28310 @@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
28311 */
28312 if (obj_priv->pin_count == 1) {
28313 atomic_inc(&dev->pin_count);
28314 - atomic_add(obj->size, &dev->pin_memory);
28315 + atomic_add_unchecked(obj->size, &dev->pin_memory);
28316 if (!obj_priv->active &&
28317 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
28318 !list_empty(&obj_priv->list))
28319 @@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
28320 list_move_tail(&obj_priv->list,
28321 &dev_priv->mm.inactive_list);
28322 atomic_dec(&dev->pin_count);
28323 - atomic_sub(obj->size, &dev->pin_memory);
28324 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
28325 }
28326 i915_verify_inactive(dev, __FILE__, __LINE__);
28327 }
28328 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c
28329 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
28330 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
28331 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
28332 int irq_received;
28333 int ret = IRQ_NONE;
28334
28335 - atomic_inc(&dev_priv->irq_received);
28336 + atomic_inc_unchecked(&dev_priv->irq_received);
28337
28338 if (IS_IGDNG(dev))
28339 return igdng_irq_handler(dev);
28340 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
28341 {
28342 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28343
28344 - atomic_set(&dev_priv->irq_received, 0);
28345 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28346
28347 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28348 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28349 diff -urNp linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h
28350 --- linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
28351 +++ linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
28352 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28353 u32 clear_cmd;
28354 u32 maccess;
28355
28356 - atomic_t vbl_received; /**< Number of vblanks received. */
28357 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28358 wait_queue_head_t fence_queue;
28359 - atomic_t last_fence_retired;
28360 + atomic_unchecked_t last_fence_retired;
28361 u32 next_fence_to_post;
28362
28363 unsigned int fb_cpp;
28364 diff -urNp linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c
28365 --- linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
28366 +++ linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
28367 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
28368 if (crtc != 0)
28369 return 0;
28370
28371 - return atomic_read(&dev_priv->vbl_received);
28372 + return atomic_read_unchecked(&dev_priv->vbl_received);
28373 }
28374
28375
28376 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
28377 /* VBLANK interrupt */
28378 if (status & MGA_VLINEPEN) {
28379 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28380 - atomic_inc(&dev_priv->vbl_received);
28381 + atomic_inc_unchecked(&dev_priv->vbl_received);
28382 drm_handle_vblank(dev, 0);
28383 handled = 1;
28384 }
28385 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
28386 MGA_WRITE(MGA_PRIMEND, prim_end);
28387 }
28388
28389 - atomic_inc(&dev_priv->last_fence_retired);
28390 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
28391 DRM_WAKEUP(&dev_priv->fence_queue);
28392 handled = 1;
28393 }
28394 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
28395 * using fences.
28396 */
28397 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28398 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28399 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28400 - *sequence) <= (1 << 23)));
28401
28402 *sequence = cur_fence;
28403 diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c
28404 --- linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
28405 +++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
28406 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
28407
28408 /* GH: Simple idle check.
28409 */
28410 - atomic_set(&dev_priv->idle_count, 0);
28411 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28412
28413 /* We don't support anything other than bus-mastering ring mode,
28414 * but the ring can be in either AGP or PCI space for the ring
28415 diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h
28416 --- linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
28417 +++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
28418 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28419 int is_pci;
28420 unsigned long cce_buffers_offset;
28421
28422 - atomic_t idle_count;
28423 + atomic_unchecked_t idle_count;
28424
28425 int page_flipping;
28426 int current_page;
28427 u32 crtc_offset;
28428 u32 crtc_offset_cntl;
28429
28430 - atomic_t vbl_received;
28431 + atomic_unchecked_t vbl_received;
28432
28433 u32 color_fmt;
28434 unsigned int front_offset;
28435 diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c
28436 --- linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
28437 +++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
28438 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
28439 if (crtc != 0)
28440 return 0;
28441
28442 - return atomic_read(&dev_priv->vbl_received);
28443 + return atomic_read_unchecked(&dev_priv->vbl_received);
28444 }
28445
28446 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28447 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
28448 /* VBLANK interrupt */
28449 if (status & R128_CRTC_VBLANK_INT) {
28450 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28451 - atomic_inc(&dev_priv->vbl_received);
28452 + atomic_inc_unchecked(&dev_priv->vbl_received);
28453 drm_handle_vblank(dev, 0);
28454 return IRQ_HANDLED;
28455 }
28456 diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c
28457 --- linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
28458 +++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
28459 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
28460
28461 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
28462 {
28463 - if (atomic_read(&dev_priv->idle_count) == 0) {
28464 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
28465 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28466 } else {
28467 - atomic_set(&dev_priv->idle_count, 0);
28468 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28469 }
28470 }
28471
28472 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c
28473 --- linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
28474 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
28475 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
28476 char name[512];
28477 int i;
28478
28479 + pax_track_stack();
28480 +
28481 ctx->card = card;
28482 ctx->bios = bios;
28483
28484 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c
28485 --- linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
28486 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
28487 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
28488 regex_t mask_rex;
28489 regmatch_t match[4];
28490 char buf[1024];
28491 - size_t end;
28492 + long end;
28493 int len;
28494 int done = 0;
28495 int r;
28496 unsigned o;
28497 struct offset *offset;
28498 char last_reg_s[10];
28499 - int last_reg;
28500 + unsigned long last_reg;
28501
28502 if (regcomp
28503 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28504 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c
28505 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
28506 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
28507 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
28508 bool linkb;
28509 struct radeon_i2c_bus_rec ddc_bus;
28510
28511 + pax_track_stack();
28512 +
28513 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
28514
28515 if (data_offset == 0)
28516 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
28517 }
28518 }
28519
28520 -struct bios_connector {
28521 +static struct bios_connector {
28522 bool valid;
28523 uint16_t line_mux;
28524 uint16_t devices;
28525 int connector_type;
28526 struct radeon_i2c_bus_rec ddc_bus;
28527 -};
28528 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
28529
28530 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
28531 drm_device
28532 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
28533 uint8_t dac;
28534 union atom_supported_devices *supported_devices;
28535 int i, j;
28536 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
28537
28538 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
28539
28540 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c
28541 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
28542 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
28543 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
28544
28545 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
28546 error = freq - current_freq;
28547 - error = error < 0 ? 0xffffffff : error;
28548 + error = (int32_t)error < 0 ? 0xffffffff : error;
28549 } else
28550 error = abs(current_freq - freq);
28551 vco_diff = abs(vco - best_vco);
28552 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h
28553 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
28554 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
28555 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
28556
28557 /* SW interrupt */
28558 wait_queue_head_t swi_queue;
28559 - atomic_t swi_emitted;
28560 + atomic_unchecked_t swi_emitted;
28561 int vblank_crtc;
28562 uint32_t irq_enable_reg;
28563 uint32_t r500_disp_irq_reg;
28564 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c
28565 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
28566 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
28567 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
28568 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28569 return 0;
28570 }
28571 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28572 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28573 if (!rdev->cp.ready) {
28574 /* FIXME: cp is not running assume everythings is done right
28575 * away
28576 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
28577 return r;
28578 }
28579 WREG32(rdev->fence_drv.scratch_reg, 0);
28580 - atomic_set(&rdev->fence_drv.seq, 0);
28581 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28582 INIT_LIST_HEAD(&rdev->fence_drv.created);
28583 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28584 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28585 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h
28586 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
28587 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h 2011-05-04 17:56:28.000000000 -0400
28588 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
28589 */
28590 struct radeon_fence_driver {
28591 uint32_t scratch_reg;
28592 - atomic_t seq;
28593 + atomic_unchecked_t seq;
28594 uint32_t last_seq;
28595 unsigned long count_timeout;
28596 wait_queue_head_t queue;
28597 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c
28598 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
28599 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
28600 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
28601 request = compat_alloc_user_space(sizeof(*request));
28602 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28603 || __put_user(req32.param, &request->param)
28604 - || __put_user((void __user *)(unsigned long)req32.value,
28605 + || __put_user((unsigned long)req32.value,
28606 &request->value))
28607 return -EFAULT;
28608
28609 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c
28610 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
28611 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
28612 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
28613 unsigned int ret;
28614 RING_LOCALS;
28615
28616 - atomic_inc(&dev_priv->swi_emitted);
28617 - ret = atomic_read(&dev_priv->swi_emitted);
28618 + atomic_inc_unchecked(&dev_priv->swi_emitted);
28619 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28620
28621 BEGIN_RING(4);
28622 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28623 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
28624 drm_radeon_private_t *dev_priv =
28625 (drm_radeon_private_t *) dev->dev_private;
28626
28627 - atomic_set(&dev_priv->swi_emitted, 0);
28628 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28629 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28630
28631 dev->max_vblank_count = 0x001fffff;
28632 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c
28633 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
28634 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
28635 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
28636 {
28637 drm_radeon_private_t *dev_priv = dev->dev_private;
28638 drm_radeon_getparam_t *param = data;
28639 - int value;
28640 + int value = 0;
28641
28642 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28643
28644 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c
28645 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
28646 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
28647 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
28648 DRM_INFO("radeon: ttm finalized\n");
28649 }
28650
28651 -static struct vm_operations_struct radeon_ttm_vm_ops;
28652 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
28653 -
28654 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
28655 -{
28656 - struct ttm_buffer_object *bo;
28657 - int r;
28658 -
28659 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
28660 - if (bo == NULL) {
28661 - return VM_FAULT_NOPAGE;
28662 - }
28663 - r = ttm_vm_ops->fault(vma, vmf);
28664 - return r;
28665 -}
28666 -
28667 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28668 {
28669 struct drm_file *file_priv;
28670 struct radeon_device *rdev;
28671 - int r;
28672
28673 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
28674 return drm_mmap(filp, vma);
28675 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
28676
28677 file_priv = (struct drm_file *)filp->private_data;
28678 rdev = file_priv->minor->dev->dev_private;
28679 - if (rdev == NULL) {
28680 + if (!rdev)
28681 return -EINVAL;
28682 - }
28683 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
28684 - if (unlikely(r != 0)) {
28685 - return r;
28686 - }
28687 - if (unlikely(ttm_vm_ops == NULL)) {
28688 - ttm_vm_ops = vma->vm_ops;
28689 - radeon_ttm_vm_ops = *ttm_vm_ops;
28690 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28691 - }
28692 - vma->vm_ops = &radeon_ttm_vm_ops;
28693 - return 0;
28694 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
28695 }
28696
28697
28698 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c
28699 --- linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
28700 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
28701 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
28702 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28703 rdev->pm.sideport_bandwidth.full)
28704 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28705 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
28706 + read_delay_latency.full = rfixed_const(800 * 1000);
28707 read_delay_latency.full = rfixed_div(read_delay_latency,
28708 rdev->pm.igp_sideport_mclk);
28709 + a.full = rfixed_const(370);
28710 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
28711 } else {
28712 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28713 rdev->pm.k8_bandwidth.full)
28714 diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c
28715 --- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
28716 +++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
28717 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
28718 NULL
28719 };
28720
28721 -static struct sysfs_ops ttm_bo_global_ops = {
28722 +static const struct sysfs_ops ttm_bo_global_ops = {
28723 .show = &ttm_bo_global_show
28724 };
28725
28726 diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c
28727 --- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
28728 +++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
28729 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
28730 {
28731 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
28732 vma->vm_private_data;
28733 - struct ttm_bo_device *bdev = bo->bdev;
28734 + struct ttm_bo_device *bdev;
28735 unsigned long bus_base;
28736 unsigned long bus_offset;
28737 unsigned long bus_size;
28738 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
28739 unsigned long address = (unsigned long)vmf->virtual_address;
28740 int retval = VM_FAULT_NOPAGE;
28741
28742 + if (!bo)
28743 + return VM_FAULT_NOPAGE;
28744 + bdev = bo->bdev;
28745 +
28746 /*
28747 * Work around locking order reversal in fault / nopfn
28748 * between mmap_sem and bo_reserve: Perform a trylock operation
28749 diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c
28750 --- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
28751 +++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
28752 @@ -36,7 +36,7 @@
28753 struct ttm_global_item {
28754 struct mutex mutex;
28755 void *object;
28756 - int refcount;
28757 + atomic_t refcount;
28758 };
28759
28760 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
28761 @@ -49,7 +49,7 @@ void ttm_global_init(void)
28762 struct ttm_global_item *item = &glob[i];
28763 mutex_init(&item->mutex);
28764 item->object = NULL;
28765 - item->refcount = 0;
28766 + atomic_set(&item->refcount, 0);
28767 }
28768 }
28769
28770 @@ -59,7 +59,7 @@ void ttm_global_release(void)
28771 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
28772 struct ttm_global_item *item = &glob[i];
28773 BUG_ON(item->object != NULL);
28774 - BUG_ON(item->refcount != 0);
28775 + BUG_ON(atomic_read(&item->refcount) != 0);
28776 }
28777 }
28778
28779 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
28780 void *object;
28781
28782 mutex_lock(&item->mutex);
28783 - if (item->refcount == 0) {
28784 + if (atomic_read(&item->refcount) == 0) {
28785 item->object = kzalloc(ref->size, GFP_KERNEL);
28786 if (unlikely(item->object == NULL)) {
28787 ret = -ENOMEM;
28788 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
28789 goto out_err;
28790
28791 }
28792 - ++item->refcount;
28793 + atomic_inc(&item->refcount);
28794 ref->object = item->object;
28795 object = item->object;
28796 mutex_unlock(&item->mutex);
28797 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
28798 struct ttm_global_item *item = &glob[ref->global_type];
28799
28800 mutex_lock(&item->mutex);
28801 - BUG_ON(item->refcount == 0);
28802 + BUG_ON(atomic_read(&item->refcount) == 0);
28803 BUG_ON(ref->object != item->object);
28804 - if (--item->refcount == 0) {
28805 + if (atomic_dec_and_test(&item->refcount)) {
28806 ref->release(ref);
28807 item->object = NULL;
28808 }
28809 diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c
28810 --- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
28811 +++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
28812 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
28813 NULL
28814 };
28815
28816 -static struct sysfs_ops ttm_mem_zone_ops = {
28817 +static const struct sysfs_ops ttm_mem_zone_ops = {
28818 .show = &ttm_mem_zone_show,
28819 .store = &ttm_mem_zone_store
28820 };
28821 diff -urNp linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h
28822 --- linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
28823 +++ linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
28824 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28825 typedef uint32_t maskarray_t[5];
28826
28827 typedef struct drm_via_irq {
28828 - atomic_t irq_received;
28829 + atomic_unchecked_t irq_received;
28830 uint32_t pending_mask;
28831 uint32_t enable_mask;
28832 wait_queue_head_t irq_queue;
28833 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
28834 struct timeval last_vblank;
28835 int last_vblank_valid;
28836 unsigned usec_per_vblank;
28837 - atomic_t vbl_received;
28838 + atomic_unchecked_t vbl_received;
28839 drm_via_state_t hc_state;
28840 char pci_buf[VIA_PCI_BUF_SIZE];
28841 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28842 diff -urNp linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c
28843 --- linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
28844 +++ linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
28845 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
28846 if (crtc != 0)
28847 return 0;
28848
28849 - return atomic_read(&dev_priv->vbl_received);
28850 + return atomic_read_unchecked(&dev_priv->vbl_received);
28851 }
28852
28853 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28854 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
28855
28856 status = VIA_READ(VIA_REG_INTERRUPT);
28857 if (status & VIA_IRQ_VBLANK_PENDING) {
28858 - atomic_inc(&dev_priv->vbl_received);
28859 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28860 + atomic_inc_unchecked(&dev_priv->vbl_received);
28861 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28862 do_gettimeofday(&cur_vblank);
28863 if (dev_priv->last_vblank_valid) {
28864 dev_priv->usec_per_vblank =
28865 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
28866 dev_priv->last_vblank = cur_vblank;
28867 dev_priv->last_vblank_valid = 1;
28868 }
28869 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28870 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28871 DRM_DEBUG("US per vblank is: %u\n",
28872 dev_priv->usec_per_vblank);
28873 }
28874 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
28875
28876 for (i = 0; i < dev_priv->num_irqs; ++i) {
28877 if (status & cur_irq->pending_mask) {
28878 - atomic_inc(&cur_irq->irq_received);
28879 + atomic_inc_unchecked(&cur_irq->irq_received);
28880 DRM_WAKEUP(&cur_irq->irq_queue);
28881 handled = 1;
28882 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
28883 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
28884 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28885 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28886 masks[irq][4]));
28887 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28888 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28889 } else {
28890 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28891 (((cur_irq_sequence =
28892 - atomic_read(&cur_irq->irq_received)) -
28893 + atomic_read_unchecked(&cur_irq->irq_received)) -
28894 *sequence) <= (1 << 23)));
28895 }
28896 *sequence = cur_irq_sequence;
28897 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
28898 }
28899
28900 for (i = 0; i < dev_priv->num_irqs; ++i) {
28901 - atomic_set(&cur_irq->irq_received, 0);
28902 + atomic_set_unchecked(&cur_irq->irq_received, 0);
28903 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28904 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28905 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28906 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
28907 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28908 case VIA_IRQ_RELATIVE:
28909 irqwait->request.sequence +=
28910 - atomic_read(&cur_irq->irq_received);
28911 + atomic_read_unchecked(&cur_irq->irq_received);
28912 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28913 case VIA_IRQ_ABSOLUTE:
28914 break;
28915 diff -urNp linux-2.6.32.42/drivers/hid/hid-core.c linux-2.6.32.42/drivers/hid/hid-core.c
28916 --- linux-2.6.32.42/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
28917 +++ linux-2.6.32.42/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
28918 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
28919
28920 int hid_add_device(struct hid_device *hdev)
28921 {
28922 - static atomic_t id = ATOMIC_INIT(0);
28923 + static atomic_unchecked_t id = ATOMIC_INIT(0);
28924 int ret;
28925
28926 if (WARN_ON(hdev->status & HID_STAT_ADDED))
28927 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
28928 /* XXX hack, any other cleaner solution after the driver core
28929 * is converted to allow more than 20 bytes as the device name? */
28930 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
28931 - hdev->vendor, hdev->product, atomic_inc_return(&id));
28932 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
28933
28934 ret = device_add(&hdev->dev);
28935 if (!ret)
28936 diff -urNp linux-2.6.32.42/drivers/hid/usbhid/hiddev.c linux-2.6.32.42/drivers/hid/usbhid/hiddev.c
28937 --- linux-2.6.32.42/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
28938 +++ linux-2.6.32.42/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
28939 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
28940 return put_user(HID_VERSION, (int __user *)arg);
28941
28942 case HIDIOCAPPLICATION:
28943 - if (arg < 0 || arg >= hid->maxapplication)
28944 + if (arg >= hid->maxapplication)
28945 return -EINVAL;
28946
28947 for (i = 0; i < hid->maxcollection; i++)
28948 diff -urNp linux-2.6.32.42/drivers/hwmon/lis3lv02d.c linux-2.6.32.42/drivers/hwmon/lis3lv02d.c
28949 --- linux-2.6.32.42/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
28950 +++ linux-2.6.32.42/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
28951 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
28952 * the lid is closed. This leads to interrupts as soon as a little move
28953 * is done.
28954 */
28955 - atomic_inc(&lis3_dev.count);
28956 + atomic_inc_unchecked(&lis3_dev.count);
28957
28958 wake_up_interruptible(&lis3_dev.misc_wait);
28959 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
28960 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
28961 if (test_and_set_bit(0, &lis3_dev.misc_opened))
28962 return -EBUSY; /* already open */
28963
28964 - atomic_set(&lis3_dev.count, 0);
28965 + atomic_set_unchecked(&lis3_dev.count, 0);
28966
28967 /*
28968 * The sensor can generate interrupts for free-fall and direction
28969 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
28970 add_wait_queue(&lis3_dev.misc_wait, &wait);
28971 while (true) {
28972 set_current_state(TASK_INTERRUPTIBLE);
28973 - data = atomic_xchg(&lis3_dev.count, 0);
28974 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
28975 if (data)
28976 break;
28977
28978 @@ -244,7 +244,7 @@ out:
28979 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
28980 {
28981 poll_wait(file, &lis3_dev.misc_wait, wait);
28982 - if (atomic_read(&lis3_dev.count))
28983 + if (atomic_read_unchecked(&lis3_dev.count))
28984 return POLLIN | POLLRDNORM;
28985 return 0;
28986 }
28987 diff -urNp linux-2.6.32.42/drivers/hwmon/lis3lv02d.h linux-2.6.32.42/drivers/hwmon/lis3lv02d.h
28988 --- linux-2.6.32.42/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
28989 +++ linux-2.6.32.42/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
28990 @@ -201,7 +201,7 @@ struct lis3lv02d {
28991
28992 struct input_polled_dev *idev; /* input device */
28993 struct platform_device *pdev; /* platform device */
28994 - atomic_t count; /* interrupt count after last read */
28995 + atomic_unchecked_t count; /* interrupt count after last read */
28996 int xcalib; /* calibrated null value for x */
28997 int ycalib; /* calibrated null value for y */
28998 int zcalib; /* calibrated null value for z */
28999 diff -urNp linux-2.6.32.42/drivers/hwmon/sht15.c linux-2.6.32.42/drivers/hwmon/sht15.c
29000 --- linux-2.6.32.42/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
29001 +++ linux-2.6.32.42/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
29002 @@ -112,7 +112,7 @@ struct sht15_data {
29003 int supply_uV;
29004 int supply_uV_valid;
29005 struct work_struct update_supply_work;
29006 - atomic_t interrupt_handled;
29007 + atomic_unchecked_t interrupt_handled;
29008 };
29009
29010 /**
29011 @@ -245,13 +245,13 @@ static inline int sht15_update_single_va
29012 return ret;
29013
29014 gpio_direction_input(data->pdata->gpio_data);
29015 - atomic_set(&data->interrupt_handled, 0);
29016 + atomic_set_unchecked(&data->interrupt_handled, 0);
29017
29018 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29019 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29020 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29021 /* Only relevant if the interrupt hasn't occured. */
29022 - if (!atomic_read(&data->interrupt_handled))
29023 + if (!atomic_read_unchecked(&data->interrupt_handled))
29024 schedule_work(&data->read_work);
29025 }
29026 ret = wait_event_timeout(data->wait_queue,
29027 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
29028 struct sht15_data *data = d;
29029 /* First disable the interrupt */
29030 disable_irq_nosync(irq);
29031 - atomic_inc(&data->interrupt_handled);
29032 + atomic_inc_unchecked(&data->interrupt_handled);
29033 /* Then schedule a reading work struct */
29034 if (data->flag != SHT15_READING_NOTHING)
29035 schedule_work(&data->read_work);
29036 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
29037 here as could have gone low in meantime so verify
29038 it hasn't!
29039 */
29040 - atomic_set(&data->interrupt_handled, 0);
29041 + atomic_set_unchecked(&data->interrupt_handled, 0);
29042 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29043 /* If still not occured or another handler has been scheduled */
29044 if (gpio_get_value(data->pdata->gpio_data)
29045 - || atomic_read(&data->interrupt_handled))
29046 + || atomic_read_unchecked(&data->interrupt_handled))
29047 return;
29048 }
29049 /* Read the data back from the device */
29050 diff -urNp linux-2.6.32.42/drivers/hwmon/w83791d.c linux-2.6.32.42/drivers/hwmon/w83791d.c
29051 --- linux-2.6.32.42/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
29052 +++ linux-2.6.32.42/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
29053 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
29054 struct i2c_board_info *info);
29055 static int w83791d_remove(struct i2c_client *client);
29056
29057 -static int w83791d_read(struct i2c_client *client, u8 register);
29058 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
29059 +static int w83791d_read(struct i2c_client *client, u8 reg);
29060 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
29061 static struct w83791d_data *w83791d_update_device(struct device *dev);
29062
29063 #ifdef DEBUG
29064 diff -urNp linux-2.6.32.42/drivers/ide/ide-cd.c linux-2.6.32.42/drivers/ide/ide-cd.c
29065 --- linux-2.6.32.42/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
29066 +++ linux-2.6.32.42/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
29067 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
29068 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29069 if ((unsigned long)buf & alignment
29070 || blk_rq_bytes(rq) & q->dma_pad_mask
29071 - || object_is_on_stack(buf))
29072 + || object_starts_on_stack(buf))
29073 drive->dma = 0;
29074 }
29075 }
29076 diff -urNp linux-2.6.32.42/drivers/ide/ide-floppy.c linux-2.6.32.42/drivers/ide/ide-floppy.c
29077 --- linux-2.6.32.42/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
29078 +++ linux-2.6.32.42/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
29079 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
29080 u8 pc_buf[256], header_len, desc_cnt;
29081 int i, rc = 1, blocks, length;
29082
29083 + pax_track_stack();
29084 +
29085 ide_debug_log(IDE_DBG_FUNC, "enter");
29086
29087 drive->bios_cyl = 0;
29088 diff -urNp linux-2.6.32.42/drivers/ide/setup-pci.c linux-2.6.32.42/drivers/ide/setup-pci.c
29089 --- linux-2.6.32.42/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
29090 +++ linux-2.6.32.42/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
29091 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
29092 int ret, i, n_ports = dev2 ? 4 : 2;
29093 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
29094
29095 + pax_track_stack();
29096 +
29097 for (i = 0; i < n_ports / 2; i++) {
29098 ret = ide_setup_pci_controller(pdev[i], d, !i);
29099 if (ret < 0)
29100 diff -urNp linux-2.6.32.42/drivers/ieee1394/dv1394.c linux-2.6.32.42/drivers/ieee1394/dv1394.c
29101 --- linux-2.6.32.42/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
29102 +++ linux-2.6.32.42/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
29103 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
29104 based upon DIF section and sequence
29105 */
29106
29107 -static void inline
29108 +static inline void
29109 frame_put_packet (struct frame *f, struct packet *p)
29110 {
29111 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
29112 diff -urNp linux-2.6.32.42/drivers/ieee1394/hosts.c linux-2.6.32.42/drivers/ieee1394/hosts.c
29113 --- linux-2.6.32.42/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
29114 +++ linux-2.6.32.42/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
29115 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
29116 }
29117
29118 static struct hpsb_host_driver dummy_driver = {
29119 + .name = "dummy",
29120 .transmit_packet = dummy_transmit_packet,
29121 .devctl = dummy_devctl,
29122 .isoctl = dummy_isoctl
29123 diff -urNp linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c
29124 --- linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
29125 +++ linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
29126 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
29127 for (func = 0; func < 8; func++) {
29128 u32 class = read_pci_config(num,slot,func,
29129 PCI_CLASS_REVISION);
29130 - if ((class == 0xffffffff))
29131 + if (class == 0xffffffff)
29132 continue; /* No device at this func */
29133
29134 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
29135 diff -urNp linux-2.6.32.42/drivers/ieee1394/ohci1394.c linux-2.6.32.42/drivers/ieee1394/ohci1394.c
29136 --- linux-2.6.32.42/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
29137 +++ linux-2.6.32.42/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
29138 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
29139 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
29140
29141 /* Module Parameters */
29142 -static int phys_dma = 1;
29143 +static int phys_dma;
29144 module_param(phys_dma, int, 0444);
29145 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
29146 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
29147
29148 static void dma_trm_tasklet(unsigned long data);
29149 static void dma_trm_reset(struct dma_trm_ctx *d);
29150 diff -urNp linux-2.6.32.42/drivers/ieee1394/sbp2.c linux-2.6.32.42/drivers/ieee1394/sbp2.c
29151 --- linux-2.6.32.42/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
29152 +++ linux-2.6.32.42/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
29153 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
29154 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
29155 MODULE_LICENSE("GPL");
29156
29157 -static int sbp2_module_init(void)
29158 +static int __init sbp2_module_init(void)
29159 {
29160 int ret;
29161
29162 diff -urNp linux-2.6.32.42/drivers/infiniband/core/cm.c linux-2.6.32.42/drivers/infiniband/core/cm.c
29163 --- linux-2.6.32.42/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
29164 +++ linux-2.6.32.42/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
29165 @@ -112,7 +112,7 @@ static char const counter_group_names[CM
29166
29167 struct cm_counter_group {
29168 struct kobject obj;
29169 - atomic_long_t counter[CM_ATTR_COUNT];
29170 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29171 };
29172
29173 struct cm_counter_attribute {
29174 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
29175 struct ib_mad_send_buf *msg = NULL;
29176 int ret;
29177
29178 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29179 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29180 counter[CM_REQ_COUNTER]);
29181
29182 /* Quick state check to discard duplicate REQs. */
29183 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
29184 if (!cm_id_priv)
29185 return;
29186
29187 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29188 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29189 counter[CM_REP_COUNTER]);
29190 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29191 if (ret)
29192 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
29193 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29194 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29195 spin_unlock_irq(&cm_id_priv->lock);
29196 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29197 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29198 counter[CM_RTU_COUNTER]);
29199 goto out;
29200 }
29201 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
29202 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29203 dreq_msg->local_comm_id);
29204 if (!cm_id_priv) {
29205 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29206 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29207 counter[CM_DREQ_COUNTER]);
29208 cm_issue_drep(work->port, work->mad_recv_wc);
29209 return -EINVAL;
29210 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
29211 case IB_CM_MRA_REP_RCVD:
29212 break;
29213 case IB_CM_TIMEWAIT:
29214 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29215 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29216 counter[CM_DREQ_COUNTER]);
29217 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29218 goto unlock;
29219 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
29220 cm_free_msg(msg);
29221 goto deref;
29222 case IB_CM_DREQ_RCVD:
29223 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29224 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29225 counter[CM_DREQ_COUNTER]);
29226 goto unlock;
29227 default:
29228 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
29229 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29230 cm_id_priv->msg, timeout)) {
29231 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29232 - atomic_long_inc(&work->port->
29233 + atomic_long_inc_unchecked(&work->port->
29234 counter_group[CM_RECV_DUPLICATES].
29235 counter[CM_MRA_COUNTER]);
29236 goto out;
29237 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
29238 break;
29239 case IB_CM_MRA_REQ_RCVD:
29240 case IB_CM_MRA_REP_RCVD:
29241 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29242 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29243 counter[CM_MRA_COUNTER]);
29244 /* fall through */
29245 default:
29246 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
29247 case IB_CM_LAP_IDLE:
29248 break;
29249 case IB_CM_MRA_LAP_SENT:
29250 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29251 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29252 counter[CM_LAP_COUNTER]);
29253 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29254 goto unlock;
29255 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
29256 cm_free_msg(msg);
29257 goto deref;
29258 case IB_CM_LAP_RCVD:
29259 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29260 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29261 counter[CM_LAP_COUNTER]);
29262 goto unlock;
29263 default:
29264 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
29265 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29266 if (cur_cm_id_priv) {
29267 spin_unlock_irq(&cm.lock);
29268 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29269 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29270 counter[CM_SIDR_REQ_COUNTER]);
29271 goto out; /* Duplicate message. */
29272 }
29273 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
29274 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29275 msg->retries = 1;
29276
29277 - atomic_long_add(1 + msg->retries,
29278 + atomic_long_add_unchecked(1 + msg->retries,
29279 &port->counter_group[CM_XMIT].counter[attr_index]);
29280 if (msg->retries)
29281 - atomic_long_add(msg->retries,
29282 + atomic_long_add_unchecked(msg->retries,
29283 &port->counter_group[CM_XMIT_RETRIES].
29284 counter[attr_index]);
29285
29286 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
29287 }
29288
29289 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29290 - atomic_long_inc(&port->counter_group[CM_RECV].
29291 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29292 counter[attr_id - CM_ATTR_ID_OFFSET]);
29293
29294 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29295 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
29296 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29297
29298 return sprintf(buf, "%ld\n",
29299 - atomic_long_read(&group->counter[cm_attr->index]));
29300 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29301 }
29302
29303 -static struct sysfs_ops cm_counter_ops = {
29304 +static const struct sysfs_ops cm_counter_ops = {
29305 .show = cm_show_counter
29306 };
29307
29308 diff -urNp linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c
29309 --- linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
29310 +++ linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
29311 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
29312
29313 struct task_struct *thread;
29314
29315 - atomic_t req_ser;
29316 - atomic_t flush_ser;
29317 + atomic_unchecked_t req_ser;
29318 + atomic_unchecked_t flush_ser;
29319
29320 wait_queue_head_t force_wait;
29321 };
29322 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
29323 struct ib_fmr_pool *pool = pool_ptr;
29324
29325 do {
29326 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29327 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29328 ib_fmr_batch_release(pool);
29329
29330 - atomic_inc(&pool->flush_ser);
29331 + atomic_inc_unchecked(&pool->flush_ser);
29332 wake_up_interruptible(&pool->force_wait);
29333
29334 if (pool->flush_function)
29335 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
29336 }
29337
29338 set_current_state(TASK_INTERRUPTIBLE);
29339 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29340 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29341 !kthread_should_stop())
29342 schedule();
29343 __set_current_state(TASK_RUNNING);
29344 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
29345 pool->dirty_watermark = params->dirty_watermark;
29346 pool->dirty_len = 0;
29347 spin_lock_init(&pool->pool_lock);
29348 - atomic_set(&pool->req_ser, 0);
29349 - atomic_set(&pool->flush_ser, 0);
29350 + atomic_set_unchecked(&pool->req_ser, 0);
29351 + atomic_set_unchecked(&pool->flush_ser, 0);
29352 init_waitqueue_head(&pool->force_wait);
29353
29354 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29355 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
29356 }
29357 spin_unlock_irq(&pool->pool_lock);
29358
29359 - serial = atomic_inc_return(&pool->req_ser);
29360 + serial = atomic_inc_return_unchecked(&pool->req_ser);
29361 wake_up_process(pool->thread);
29362
29363 if (wait_event_interruptible(pool->force_wait,
29364 - atomic_read(&pool->flush_ser) - serial >= 0))
29365 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
29366 return -EINTR;
29367
29368 return 0;
29369 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
29370 } else {
29371 list_add_tail(&fmr->list, &pool->dirty_list);
29372 if (++pool->dirty_len >= pool->dirty_watermark) {
29373 - atomic_inc(&pool->req_ser);
29374 + atomic_inc_unchecked(&pool->req_ser);
29375 wake_up_process(pool->thread);
29376 }
29377 }
29378 diff -urNp linux-2.6.32.42/drivers/infiniband/core/sysfs.c linux-2.6.32.42/drivers/infiniband/core/sysfs.c
29379 --- linux-2.6.32.42/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
29380 +++ linux-2.6.32.42/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
29381 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
29382 return port_attr->show(p, port_attr, buf);
29383 }
29384
29385 -static struct sysfs_ops port_sysfs_ops = {
29386 +static const struct sysfs_ops port_sysfs_ops = {
29387 .show = port_attr_show
29388 };
29389
29390 diff -urNp linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c
29391 --- linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
29392 +++ linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
29393 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
29394 dst->grh.sgid_index = src->grh.sgid_index;
29395 dst->grh.hop_limit = src->grh.hop_limit;
29396 dst->grh.traffic_class = src->grh.traffic_class;
29397 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
29398 dst->dlid = src->dlid;
29399 dst->sl = src->sl;
29400 dst->src_path_bits = src->src_path_bits;
29401 dst->static_rate = src->static_rate;
29402 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
29403 dst->port_num = src->port_num;
29404 + dst->reserved = 0;
29405 }
29406 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
29407
29408 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
29409 struct ib_qp_attr *src)
29410 {
29411 + dst->qp_state = src->qp_state;
29412 dst->cur_qp_state = src->cur_qp_state;
29413 dst->path_mtu = src->path_mtu;
29414 dst->path_mig_state = src->path_mig_state;
29415 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
29416 dst->rnr_retry = src->rnr_retry;
29417 dst->alt_port_num = src->alt_port_num;
29418 dst->alt_timeout = src->alt_timeout;
29419 + memset(dst->reserved, 0, sizeof(dst->reserved));
29420 }
29421 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
29422
29423 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c
29424 --- linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
29425 +++ linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
29426 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
29427 struct infinipath_counters counters;
29428 struct ipath_devdata *dd;
29429
29430 + pax_track_stack();
29431 +
29432 dd = file->f_path.dentry->d_inode->i_private;
29433 dd->ipath_f_read_counters(dd, &counters);
29434
29435 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c
29436 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
29437 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
29438 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
29439 LIST_HEAD(nes_adapter_list);
29440 static LIST_HEAD(nes_dev_list);
29441
29442 -atomic_t qps_destroyed;
29443 +atomic_unchecked_t qps_destroyed;
29444
29445 static unsigned int ee_flsh_adapter;
29446 static unsigned int sysfs_nonidx_addr;
29447 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
29448 struct nes_adapter *nesadapter = nesdev->nesadapter;
29449 u32 qp_id;
29450
29451 - atomic_inc(&qps_destroyed);
29452 + atomic_inc_unchecked(&qps_destroyed);
29453
29454 /* Free the control structures */
29455
29456 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c
29457 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
29458 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
29459 @@ -69,11 +69,11 @@ u32 cm_packets_received;
29460 u32 cm_listens_created;
29461 u32 cm_listens_destroyed;
29462 u32 cm_backlog_drops;
29463 -atomic_t cm_loopbacks;
29464 -atomic_t cm_nodes_created;
29465 -atomic_t cm_nodes_destroyed;
29466 -atomic_t cm_accel_dropped_pkts;
29467 -atomic_t cm_resets_recvd;
29468 +atomic_unchecked_t cm_loopbacks;
29469 +atomic_unchecked_t cm_nodes_created;
29470 +atomic_unchecked_t cm_nodes_destroyed;
29471 +atomic_unchecked_t cm_accel_dropped_pkts;
29472 +atomic_unchecked_t cm_resets_recvd;
29473
29474 static inline int mini_cm_accelerated(struct nes_cm_core *,
29475 struct nes_cm_node *);
29476 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
29477
29478 static struct nes_cm_core *g_cm_core;
29479
29480 -atomic_t cm_connects;
29481 -atomic_t cm_accepts;
29482 -atomic_t cm_disconnects;
29483 -atomic_t cm_closes;
29484 -atomic_t cm_connecteds;
29485 -atomic_t cm_connect_reqs;
29486 -atomic_t cm_rejects;
29487 +atomic_unchecked_t cm_connects;
29488 +atomic_unchecked_t cm_accepts;
29489 +atomic_unchecked_t cm_disconnects;
29490 +atomic_unchecked_t cm_closes;
29491 +atomic_unchecked_t cm_connecteds;
29492 +atomic_unchecked_t cm_connect_reqs;
29493 +atomic_unchecked_t cm_rejects;
29494
29495
29496 /**
29497 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
29498 cm_node->rem_mac);
29499
29500 add_hte_node(cm_core, cm_node);
29501 - atomic_inc(&cm_nodes_created);
29502 + atomic_inc_unchecked(&cm_nodes_created);
29503
29504 return cm_node;
29505 }
29506 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
29507 }
29508
29509 atomic_dec(&cm_core->node_cnt);
29510 - atomic_inc(&cm_nodes_destroyed);
29511 + atomic_inc_unchecked(&cm_nodes_destroyed);
29512 nesqp = cm_node->nesqp;
29513 if (nesqp) {
29514 nesqp->cm_node = NULL;
29515 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
29516
29517 static void drop_packet(struct sk_buff *skb)
29518 {
29519 - atomic_inc(&cm_accel_dropped_pkts);
29520 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
29521 dev_kfree_skb_any(skb);
29522 }
29523
29524 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
29525
29526 int reset = 0; /* whether to send reset in case of err.. */
29527 int passive_state;
29528 - atomic_inc(&cm_resets_recvd);
29529 + atomic_inc_unchecked(&cm_resets_recvd);
29530 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
29531 " refcnt=%d\n", cm_node, cm_node->state,
29532 atomic_read(&cm_node->ref_count));
29533 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
29534 rem_ref_cm_node(cm_node->cm_core, cm_node);
29535 return NULL;
29536 }
29537 - atomic_inc(&cm_loopbacks);
29538 + atomic_inc_unchecked(&cm_loopbacks);
29539 loopbackremotenode->loopbackpartner = cm_node;
29540 loopbackremotenode->tcp_cntxt.rcv_wscale =
29541 NES_CM_DEFAULT_RCV_WND_SCALE;
29542 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
29543 add_ref_cm_node(cm_node);
29544 } else if (cm_node->state == NES_CM_STATE_TSA) {
29545 rem_ref_cm_node(cm_core, cm_node);
29546 - atomic_inc(&cm_accel_dropped_pkts);
29547 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
29548 dev_kfree_skb_any(skb);
29549 break;
29550 }
29551 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
29552
29553 if ((cm_id) && (cm_id->event_handler)) {
29554 if (issue_disconn) {
29555 - atomic_inc(&cm_disconnects);
29556 + atomic_inc_unchecked(&cm_disconnects);
29557 cm_event.event = IW_CM_EVENT_DISCONNECT;
29558 cm_event.status = disconn_status;
29559 cm_event.local_addr = cm_id->local_addr;
29560 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
29561 }
29562
29563 if (issue_close) {
29564 - atomic_inc(&cm_closes);
29565 + atomic_inc_unchecked(&cm_closes);
29566 nes_disconnect(nesqp, 1);
29567
29568 cm_id->provider_data = nesqp;
29569 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
29570
29571 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
29572 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
29573 - atomic_inc(&cm_accepts);
29574 + atomic_inc_unchecked(&cm_accepts);
29575
29576 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
29577 atomic_read(&nesvnic->netdev->refcnt));
29578 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
29579
29580 struct nes_cm_core *cm_core;
29581
29582 - atomic_inc(&cm_rejects);
29583 + atomic_inc_unchecked(&cm_rejects);
29584 cm_node = (struct nes_cm_node *) cm_id->provider_data;
29585 loopback = cm_node->loopbackpartner;
29586 cm_core = cm_node->cm_core;
29587 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
29588 ntohl(cm_id->local_addr.sin_addr.s_addr),
29589 ntohs(cm_id->local_addr.sin_port));
29590
29591 - atomic_inc(&cm_connects);
29592 + atomic_inc_unchecked(&cm_connects);
29593 nesqp->active_conn = 1;
29594
29595 /* cache the cm_id in the qp */
29596 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
29597 if (nesqp->destroyed) {
29598 return;
29599 }
29600 - atomic_inc(&cm_connecteds);
29601 + atomic_inc_unchecked(&cm_connecteds);
29602 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
29603 " local port 0x%04X. jiffies = %lu.\n",
29604 nesqp->hwqp.qp_id,
29605 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
29606
29607 ret = cm_id->event_handler(cm_id, &cm_event);
29608 cm_id->add_ref(cm_id);
29609 - atomic_inc(&cm_closes);
29610 + atomic_inc_unchecked(&cm_closes);
29611 cm_event.event = IW_CM_EVENT_CLOSE;
29612 cm_event.status = IW_CM_EVENT_STATUS_OK;
29613 cm_event.provider_data = cm_id->provider_data;
29614 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
29615 return;
29616 cm_id = cm_node->cm_id;
29617
29618 - atomic_inc(&cm_connect_reqs);
29619 + atomic_inc_unchecked(&cm_connect_reqs);
29620 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
29621 cm_node, cm_id, jiffies);
29622
29623 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
29624 return;
29625 cm_id = cm_node->cm_id;
29626
29627 - atomic_inc(&cm_connect_reqs);
29628 + atomic_inc_unchecked(&cm_connect_reqs);
29629 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
29630 cm_node, cm_id, jiffies);
29631
29632 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h
29633 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
29634 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
29635 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
29636 extern unsigned int wqm_quanta;
29637 extern struct list_head nes_adapter_list;
29638
29639 -extern atomic_t cm_connects;
29640 -extern atomic_t cm_accepts;
29641 -extern atomic_t cm_disconnects;
29642 -extern atomic_t cm_closes;
29643 -extern atomic_t cm_connecteds;
29644 -extern atomic_t cm_connect_reqs;
29645 -extern atomic_t cm_rejects;
29646 -extern atomic_t mod_qp_timouts;
29647 -extern atomic_t qps_created;
29648 -extern atomic_t qps_destroyed;
29649 -extern atomic_t sw_qps_destroyed;
29650 +extern atomic_unchecked_t cm_connects;
29651 +extern atomic_unchecked_t cm_accepts;
29652 +extern atomic_unchecked_t cm_disconnects;
29653 +extern atomic_unchecked_t cm_closes;
29654 +extern atomic_unchecked_t cm_connecteds;
29655 +extern atomic_unchecked_t cm_connect_reqs;
29656 +extern atomic_unchecked_t cm_rejects;
29657 +extern atomic_unchecked_t mod_qp_timouts;
29658 +extern atomic_unchecked_t qps_created;
29659 +extern atomic_unchecked_t qps_destroyed;
29660 +extern atomic_unchecked_t sw_qps_destroyed;
29661 extern u32 mh_detected;
29662 extern u32 mh_pauses_sent;
29663 extern u32 cm_packets_sent;
29664 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
29665 extern u32 cm_listens_created;
29666 extern u32 cm_listens_destroyed;
29667 extern u32 cm_backlog_drops;
29668 -extern atomic_t cm_loopbacks;
29669 -extern atomic_t cm_nodes_created;
29670 -extern atomic_t cm_nodes_destroyed;
29671 -extern atomic_t cm_accel_dropped_pkts;
29672 -extern atomic_t cm_resets_recvd;
29673 +extern atomic_unchecked_t cm_loopbacks;
29674 +extern atomic_unchecked_t cm_nodes_created;
29675 +extern atomic_unchecked_t cm_nodes_destroyed;
29676 +extern atomic_unchecked_t cm_accel_dropped_pkts;
29677 +extern atomic_unchecked_t cm_resets_recvd;
29678
29679 extern u32 int_mod_timer_init;
29680 extern u32 int_mod_cq_depth_256;
29681 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c
29682 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
29683 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
29684 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
29685 target_stat_values[++index] = mh_detected;
29686 target_stat_values[++index] = mh_pauses_sent;
29687 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
29688 - target_stat_values[++index] = atomic_read(&cm_connects);
29689 - target_stat_values[++index] = atomic_read(&cm_accepts);
29690 - target_stat_values[++index] = atomic_read(&cm_disconnects);
29691 - target_stat_values[++index] = atomic_read(&cm_connecteds);
29692 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
29693 - target_stat_values[++index] = atomic_read(&cm_rejects);
29694 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
29695 - target_stat_values[++index] = atomic_read(&qps_created);
29696 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
29697 - target_stat_values[++index] = atomic_read(&qps_destroyed);
29698 - target_stat_values[++index] = atomic_read(&cm_closes);
29699 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
29700 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
29701 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
29702 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
29703 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
29704 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
29705 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
29706 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
29707 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
29708 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
29709 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
29710 target_stat_values[++index] = cm_packets_sent;
29711 target_stat_values[++index] = cm_packets_bounced;
29712 target_stat_values[++index] = cm_packets_created;
29713 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
29714 target_stat_values[++index] = cm_listens_created;
29715 target_stat_values[++index] = cm_listens_destroyed;
29716 target_stat_values[++index] = cm_backlog_drops;
29717 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
29718 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
29719 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
29720 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
29721 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
29722 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
29723 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
29724 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
29725 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
29726 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
29727 target_stat_values[++index] = int_mod_timer_init;
29728 target_stat_values[++index] = int_mod_cq_depth_1;
29729 target_stat_values[++index] = int_mod_cq_depth_4;
29730 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c
29731 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
29732 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
29733 @@ -45,9 +45,9 @@
29734
29735 #include <rdma/ib_umem.h>
29736
29737 -atomic_t mod_qp_timouts;
29738 -atomic_t qps_created;
29739 -atomic_t sw_qps_destroyed;
29740 +atomic_unchecked_t mod_qp_timouts;
29741 +atomic_unchecked_t qps_created;
29742 +atomic_unchecked_t sw_qps_destroyed;
29743
29744 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
29745
29746 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
29747 if (init_attr->create_flags)
29748 return ERR_PTR(-EINVAL);
29749
29750 - atomic_inc(&qps_created);
29751 + atomic_inc_unchecked(&qps_created);
29752 switch (init_attr->qp_type) {
29753 case IB_QPT_RC:
29754 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
29755 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
29756 struct iw_cm_event cm_event;
29757 int ret;
29758
29759 - atomic_inc(&sw_qps_destroyed);
29760 + atomic_inc_unchecked(&sw_qps_destroyed);
29761 nesqp->destroyed = 1;
29762
29763 /* Blow away the connection if it exists. */
29764 diff -urNp linux-2.6.32.42/drivers/input/gameport/gameport.c linux-2.6.32.42/drivers/input/gameport/gameport.c
29765 --- linux-2.6.32.42/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
29766 +++ linux-2.6.32.42/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
29767 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
29768 */
29769 static void gameport_init_port(struct gameport *gameport)
29770 {
29771 - static atomic_t gameport_no = ATOMIC_INIT(0);
29772 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
29773
29774 __module_get(THIS_MODULE);
29775
29776 mutex_init(&gameport->drv_mutex);
29777 device_initialize(&gameport->dev);
29778 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
29779 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
29780 gameport->dev.bus = &gameport_bus;
29781 gameport->dev.release = gameport_release_port;
29782 if (gameport->parent)
29783 diff -urNp linux-2.6.32.42/drivers/input/input.c linux-2.6.32.42/drivers/input/input.c
29784 --- linux-2.6.32.42/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
29785 +++ linux-2.6.32.42/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
29786 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
29787 */
29788 int input_register_device(struct input_dev *dev)
29789 {
29790 - static atomic_t input_no = ATOMIC_INIT(0);
29791 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
29792 struct input_handler *handler;
29793 const char *path;
29794 int error;
29795 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
29796 dev->setkeycode = input_default_setkeycode;
29797
29798 dev_set_name(&dev->dev, "input%ld",
29799 - (unsigned long) atomic_inc_return(&input_no) - 1);
29800 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
29801
29802 error = device_add(&dev->dev);
29803 if (error)
29804 diff -urNp linux-2.6.32.42/drivers/input/joystick/sidewinder.c linux-2.6.32.42/drivers/input/joystick/sidewinder.c
29805 --- linux-2.6.32.42/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
29806 +++ linux-2.6.32.42/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
29807 @@ -30,6 +30,7 @@
29808 #include <linux/kernel.h>
29809 #include <linux/module.h>
29810 #include <linux/slab.h>
29811 +#include <linux/sched.h>
29812 #include <linux/init.h>
29813 #include <linux/input.h>
29814 #include <linux/gameport.h>
29815 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
29816 unsigned char buf[SW_LENGTH];
29817 int i;
29818
29819 + pax_track_stack();
29820 +
29821 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
29822
29823 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
29824 diff -urNp linux-2.6.32.42/drivers/input/joystick/xpad.c linux-2.6.32.42/drivers/input/joystick/xpad.c
29825 --- linux-2.6.32.42/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
29826 +++ linux-2.6.32.42/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
29827 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
29828
29829 static int xpad_led_probe(struct usb_xpad *xpad)
29830 {
29831 - static atomic_t led_seq = ATOMIC_INIT(0);
29832 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
29833 long led_no;
29834 struct xpad_led *led;
29835 struct led_classdev *led_cdev;
29836 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
29837 if (!led)
29838 return -ENOMEM;
29839
29840 - led_no = (long)atomic_inc_return(&led_seq) - 1;
29841 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
29842
29843 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
29844 led->xpad = xpad;
29845 diff -urNp linux-2.6.32.42/drivers/input/serio/serio.c linux-2.6.32.42/drivers/input/serio/serio.c
29846 --- linux-2.6.32.42/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
29847 +++ linux-2.6.32.42/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
29848 @@ -527,7 +527,7 @@ static void serio_release_port(struct de
29849 */
29850 static void serio_init_port(struct serio *serio)
29851 {
29852 - static atomic_t serio_no = ATOMIC_INIT(0);
29853 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
29854
29855 __module_get(THIS_MODULE);
29856
29857 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio
29858 mutex_init(&serio->drv_mutex);
29859 device_initialize(&serio->dev);
29860 dev_set_name(&serio->dev, "serio%ld",
29861 - (long)atomic_inc_return(&serio_no) - 1);
29862 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
29863 serio->dev.bus = &serio_bus;
29864 serio->dev.release = serio_release_port;
29865 if (serio->parent) {
29866 diff -urNp linux-2.6.32.42/drivers/isdn/gigaset/common.c linux-2.6.32.42/drivers/isdn/gigaset/common.c
29867 --- linux-2.6.32.42/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
29868 +++ linux-2.6.32.42/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
29869 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
29870 cs->commands_pending = 0;
29871 cs->cur_at_seq = 0;
29872 cs->gotfwver = -1;
29873 - cs->open_count = 0;
29874 + local_set(&cs->open_count, 0);
29875 cs->dev = NULL;
29876 cs->tty = NULL;
29877 cs->tty_dev = NULL;
29878 diff -urNp linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h
29879 --- linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
29880 +++ linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
29881 @@ -34,6 +34,7 @@
29882 #include <linux/tty_driver.h>
29883 #include <linux/list.h>
29884 #include <asm/atomic.h>
29885 +#include <asm/local.h>
29886
29887 #define GIG_VERSION {0,5,0,0}
29888 #define GIG_COMPAT {0,4,0,0}
29889 @@ -446,7 +447,7 @@ struct cardstate {
29890 spinlock_t cmdlock;
29891 unsigned curlen, cmdbytes;
29892
29893 - unsigned open_count;
29894 + local_t open_count;
29895 struct tty_struct *tty;
29896 struct tasklet_struct if_wake_tasklet;
29897 unsigned control_state;
29898 diff -urNp linux-2.6.32.42/drivers/isdn/gigaset/interface.c linux-2.6.32.42/drivers/isdn/gigaset/interface.c
29899 --- linux-2.6.32.42/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
29900 +++ linux-2.6.32.42/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
29901 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
29902 return -ERESTARTSYS; // FIXME -EINTR?
29903 tty->driver_data = cs;
29904
29905 - ++cs->open_count;
29906 -
29907 - if (cs->open_count == 1) {
29908 + if (local_inc_return(&cs->open_count) == 1) {
29909 spin_lock_irqsave(&cs->lock, flags);
29910 cs->tty = tty;
29911 spin_unlock_irqrestore(&cs->lock, flags);
29912 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
29913
29914 if (!cs->connected)
29915 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29916 - else if (!cs->open_count)
29917 + else if (!local_read(&cs->open_count))
29918 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29919 else {
29920 - if (!--cs->open_count) {
29921 + if (!local_dec_return(&cs->open_count)) {
29922 spin_lock_irqsave(&cs->lock, flags);
29923 cs->tty = NULL;
29924 spin_unlock_irqrestore(&cs->lock, flags);
29925 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
29926 if (!cs->connected) {
29927 gig_dbg(DEBUG_IF, "not connected");
29928 retval = -ENODEV;
29929 - } else if (!cs->open_count)
29930 + } else if (!local_read(&cs->open_count))
29931 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29932 else {
29933 retval = 0;
29934 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
29935 if (!cs->connected) {
29936 gig_dbg(DEBUG_IF, "not connected");
29937 retval = -ENODEV;
29938 - } else if (!cs->open_count)
29939 + } else if (!local_read(&cs->open_count))
29940 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29941 else if (cs->mstate != MS_LOCKED) {
29942 dev_warn(cs->dev, "can't write to unlocked device\n");
29943 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
29944 if (!cs->connected) {
29945 gig_dbg(DEBUG_IF, "not connected");
29946 retval = -ENODEV;
29947 - } else if (!cs->open_count)
29948 + } else if (!local_read(&cs->open_count))
29949 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29950 else if (cs->mstate != MS_LOCKED) {
29951 dev_warn(cs->dev, "can't write to unlocked device\n");
29952 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
29953
29954 if (!cs->connected)
29955 gig_dbg(DEBUG_IF, "not connected");
29956 - else if (!cs->open_count)
29957 + else if (!local_read(&cs->open_count))
29958 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29959 else if (cs->mstate != MS_LOCKED)
29960 dev_warn(cs->dev, "can't write to unlocked device\n");
29961 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
29962
29963 if (!cs->connected)
29964 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29965 - else if (!cs->open_count)
29966 + else if (!local_read(&cs->open_count))
29967 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29968 else {
29969 //FIXME
29970 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
29971
29972 if (!cs->connected)
29973 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29974 - else if (!cs->open_count)
29975 + else if (!local_read(&cs->open_count))
29976 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29977 else {
29978 //FIXME
29979 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
29980 goto out;
29981 }
29982
29983 - if (!cs->open_count) {
29984 + if (!local_read(&cs->open_count)) {
29985 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29986 goto out;
29987 }
29988 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c
29989 --- linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
29990 +++ linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
29991 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
29992 }
29993 if (left) {
29994 if (t4file->user) {
29995 - if (copy_from_user(buf, dp, left))
29996 + if (left > sizeof buf || copy_from_user(buf, dp, left))
29997 return -EFAULT;
29998 } else {
29999 memcpy(buf, dp, left);
30000 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
30001 }
30002 if (left) {
30003 if (config->user) {
30004 - if (copy_from_user(buf, dp, left))
30005 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30006 return -EFAULT;
30007 } else {
30008 memcpy(buf, dp, left);
30009 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c
30010 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
30011 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
30012 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
30013 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
30014 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
30015
30016 + pax_track_stack();
30017
30018 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
30019 {
30020 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c
30021 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
30022 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
30023 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
30024 IDI_SYNC_REQ req;
30025 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30026
30027 + pax_track_stack();
30028 +
30029 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30030
30031 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30032 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c
30033 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
30034 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
30035 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
30036 IDI_SYNC_REQ req;
30037 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30038
30039 + pax_track_stack();
30040 +
30041 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30042
30043 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30044 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c
30045 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
30046 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
30047 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
30048 IDI_SYNC_REQ req;
30049 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30050
30051 + pax_track_stack();
30052 +
30053 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30054
30055 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30056 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c
30057 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
30058 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
30059 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
30060 IDI_SYNC_REQ req;
30061 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30062
30063 + pax_track_stack();
30064 +
30065 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30066
30067 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30068 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c
30069 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
30070 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
30071 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
30072 dword d;
30073 word w;
30074
30075 + pax_track_stack();
30076 +
30077 a = plci->adapter;
30078 Id = ((word)plci->Id<<8)|a->Id;
30079 PUT_WORD(&SS_Ind[4],0x0000);
30080 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
30081 word j, n, w;
30082 dword d;
30083
30084 + pax_track_stack();
30085 +
30086
30087 for(i=0;i<8;i++) bp_parms[i].length = 0;
30088 for(i=0;i<2;i++) global_config[i].length = 0;
30089 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
30090 const byte llc3[] = {4,3,2,2,6,6,0};
30091 const byte header[] = {0,2,3,3,0,0,0};
30092
30093 + pax_track_stack();
30094 +
30095 for(i=0;i<8;i++) bp_parms[i].length = 0;
30096 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
30097 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
30098 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
30099 word appl_number_group_type[MAX_APPL];
30100 PLCI *auxplci;
30101
30102 + pax_track_stack();
30103 +
30104 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
30105
30106 if(!a->group_optimization_enabled)
30107 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c
30108 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
30109 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
30110 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
30111 IDI_SYNC_REQ req;
30112 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30113
30114 + pax_track_stack();
30115 +
30116 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30117
30118 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30119 diff -urNp linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c
30120 --- linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
30121 +++ linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
30122 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
30123 } iocpar;
30124 void __user *argp = (void __user *)arg;
30125
30126 + pax_track_stack();
30127 +
30128 #define name iocpar.name
30129 #define bname iocpar.bname
30130 #define iocts iocpar.iocts
30131 diff -urNp linux-2.6.32.42/drivers/isdn/icn/icn.c linux-2.6.32.42/drivers/isdn/icn/icn.c
30132 --- linux-2.6.32.42/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
30133 +++ linux-2.6.32.42/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
30134 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
30135 if (count > len)
30136 count = len;
30137 if (user) {
30138 - if (copy_from_user(msg, buf, count))
30139 + if (count > sizeof msg || copy_from_user(msg, buf, count))
30140 return -EFAULT;
30141 } else
30142 memcpy(msg, buf, count);
30143 diff -urNp linux-2.6.32.42/drivers/isdn/mISDN/socket.c linux-2.6.32.42/drivers/isdn/mISDN/socket.c
30144 --- linux-2.6.32.42/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
30145 +++ linux-2.6.32.42/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
30146 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
30147 if (dev) {
30148 struct mISDN_devinfo di;
30149
30150 + memset(&di, 0, sizeof(di));
30151 di.id = dev->id;
30152 di.Dprotocols = dev->Dprotocols;
30153 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30154 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
30155 if (dev) {
30156 struct mISDN_devinfo di;
30157
30158 + memset(&di, 0, sizeof(di));
30159 di.id = dev->id;
30160 di.Dprotocols = dev->Dprotocols;
30161 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30162 diff -urNp linux-2.6.32.42/drivers/isdn/sc/interrupt.c linux-2.6.32.42/drivers/isdn/sc/interrupt.c
30163 --- linux-2.6.32.42/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
30164 +++ linux-2.6.32.42/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
30165 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
30166 }
30167 else if(callid>=0x0000 && callid<=0x7FFF)
30168 {
30169 + int len;
30170 +
30171 pr_debug("%s: Got Incoming Call\n",
30172 sc_adapter[card]->devicename);
30173 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
30174 - strcpy(setup.eazmsn,
30175 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
30176 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
30177 + sizeof(setup.phone));
30178 + if (len >= sizeof(setup.phone))
30179 + continue;
30180 + len = strlcpy(setup.eazmsn,
30181 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30182 + sizeof(setup.eazmsn));
30183 + if (len >= sizeof(setup.eazmsn))
30184 + continue;
30185 setup.si1 = 7;
30186 setup.si2 = 0;
30187 setup.plan = 0;
30188 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
30189 * Handle a GetMyNumber Rsp
30190 */
30191 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
30192 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
30193 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30194 + rcvmsg.msg_data.byte_array,
30195 + sizeof(rcvmsg.msg_data.byte_array));
30196 continue;
30197 }
30198
30199 diff -urNp linux-2.6.32.42/drivers/lguest/core.c linux-2.6.32.42/drivers/lguest/core.c
30200 --- linux-2.6.32.42/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
30201 +++ linux-2.6.32.42/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
30202 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
30203 * it's worked so far. The end address needs +1 because __get_vm_area
30204 * allocates an extra guard page, so we need space for that.
30205 */
30206 +
30207 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30208 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30209 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30210 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30211 +#else
30212 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30213 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30214 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30215 +#endif
30216 +
30217 if (!switcher_vma) {
30218 err = -ENOMEM;
30219 printk("lguest: could not map switcher pages high\n");
30220 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
30221 * Now the Switcher is mapped at the right address, we can't fail!
30222 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
30223 */
30224 - memcpy(switcher_vma->addr, start_switcher_text,
30225 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30226 end_switcher_text - start_switcher_text);
30227
30228 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30229 diff -urNp linux-2.6.32.42/drivers/lguest/x86/core.c linux-2.6.32.42/drivers/lguest/x86/core.c
30230 --- linux-2.6.32.42/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
30231 +++ linux-2.6.32.42/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
30232 @@ -59,7 +59,7 @@ static struct {
30233 /* Offset from where switcher.S was compiled to where we've copied it */
30234 static unsigned long switcher_offset(void)
30235 {
30236 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30237 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30238 }
30239
30240 /* This cpu's struct lguest_pages. */
30241 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
30242 * These copies are pretty cheap, so we do them unconditionally: */
30243 /* Save the current Host top-level page directory.
30244 */
30245 +
30246 +#ifdef CONFIG_PAX_PER_CPU_PGD
30247 + pages->state.host_cr3 = read_cr3();
30248 +#else
30249 pages->state.host_cr3 = __pa(current->mm->pgd);
30250 +#endif
30251 +
30252 /*
30253 * Set up the Guest's page tables to see this CPU's pages (and no
30254 * other CPU's pages).
30255 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
30256 * compiled-in switcher code and the high-mapped copy we just made.
30257 */
30258 for (i = 0; i < IDT_ENTRIES; i++)
30259 - default_idt_entries[i] += switcher_offset();
30260 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30261
30262 /*
30263 * Set up the Switcher's per-cpu areas.
30264 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
30265 * it will be undisturbed when we switch. To change %cs and jump we
30266 * need this structure to feed to Intel's "lcall" instruction.
30267 */
30268 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30269 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30270 lguest_entry.segment = LGUEST_CS;
30271
30272 /*
30273 diff -urNp linux-2.6.32.42/drivers/lguest/x86/switcher_32.S linux-2.6.32.42/drivers/lguest/x86/switcher_32.S
30274 --- linux-2.6.32.42/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
30275 +++ linux-2.6.32.42/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
30276 @@ -87,6 +87,7 @@
30277 #include <asm/page.h>
30278 #include <asm/segment.h>
30279 #include <asm/lguest.h>
30280 +#include <asm/processor-flags.h>
30281
30282 // We mark the start of the code to copy
30283 // It's placed in .text tho it's never run here
30284 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30285 // Changes type when we load it: damn Intel!
30286 // For after we switch over our page tables
30287 // That entry will be read-only: we'd crash.
30288 +
30289 +#ifdef CONFIG_PAX_KERNEXEC
30290 + mov %cr0, %edx
30291 + xor $X86_CR0_WP, %edx
30292 + mov %edx, %cr0
30293 +#endif
30294 +
30295 movl $(GDT_ENTRY_TSS*8), %edx
30296 ltr %dx
30297
30298 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30299 // Let's clear it again for our return.
30300 // The GDT descriptor of the Host
30301 // Points to the table after two "size" bytes
30302 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30303 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30304 // Clear "used" from type field (byte 5, bit 2)
30305 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30306 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30307 +
30308 +#ifdef CONFIG_PAX_KERNEXEC
30309 + mov %cr0, %eax
30310 + xor $X86_CR0_WP, %eax
30311 + mov %eax, %cr0
30312 +#endif
30313
30314 // Once our page table's switched, the Guest is live!
30315 // The Host fades as we run this final step.
30316 @@ -295,13 +309,12 @@ deliver_to_host:
30317 // I consulted gcc, and it gave
30318 // These instructions, which I gladly credit:
30319 leal (%edx,%ebx,8), %eax
30320 - movzwl (%eax),%edx
30321 - movl 4(%eax), %eax
30322 - xorw %ax, %ax
30323 - orl %eax, %edx
30324 + movl 4(%eax), %edx
30325 + movw (%eax), %dx
30326 // Now the address of the handler's in %edx
30327 // We call it now: its "iret" drops us home.
30328 - jmp *%edx
30329 + ljmp $__KERNEL_CS, $1f
30330 +1: jmp *%edx
30331
30332 // Every interrupt can come to us here
30333 // But we must truly tell each apart.
30334 diff -urNp linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c
30335 --- linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
30336 +++ linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
30337 @@ -15,7 +15,7 @@
30338
30339 #define MAX_PMU_LEVEL 0xFF
30340
30341 -static struct backlight_ops pmu_backlight_data;
30342 +static const struct backlight_ops pmu_backlight_data;
30343 static DEFINE_SPINLOCK(pmu_backlight_lock);
30344 static int sleeping, uses_pmu_bl;
30345 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
30346 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
30347 return bd->props.brightness;
30348 }
30349
30350 -static struct backlight_ops pmu_backlight_data = {
30351 +static const struct backlight_ops pmu_backlight_data = {
30352 .get_brightness = pmu_backlight_get_brightness,
30353 .update_status = pmu_backlight_update_status,
30354
30355 diff -urNp linux-2.6.32.42/drivers/macintosh/via-pmu.c linux-2.6.32.42/drivers/macintosh/via-pmu.c
30356 --- linux-2.6.32.42/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
30357 +++ linux-2.6.32.42/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
30358 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
30359 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
30360 }
30361
30362 -static struct platform_suspend_ops pmu_pm_ops = {
30363 +static const struct platform_suspend_ops pmu_pm_ops = {
30364 .enter = powerbook_sleep,
30365 .valid = pmu_sleep_valid,
30366 };
30367 diff -urNp linux-2.6.32.42/drivers/md/dm.c linux-2.6.32.42/drivers/md/dm.c
30368 --- linux-2.6.32.42/drivers/md/dm.c 2011-03-27 14:31:47.000000000 -0400
30369 +++ linux-2.6.32.42/drivers/md/dm.c 2011-05-04 17:56:28.000000000 -0400
30370 @@ -163,9 +163,9 @@ struct mapped_device {
30371 /*
30372 * Event handling.
30373 */
30374 - atomic_t event_nr;
30375 + atomic_unchecked_t event_nr;
30376 wait_queue_head_t eventq;
30377 - atomic_t uevent_seq;
30378 + atomic_unchecked_t uevent_seq;
30379 struct list_head uevent_list;
30380 spinlock_t uevent_lock; /* Protect access to uevent_list */
30381
30382 @@ -1770,8 +1770,8 @@ static struct mapped_device *alloc_dev(i
30383 rwlock_init(&md->map_lock);
30384 atomic_set(&md->holders, 1);
30385 atomic_set(&md->open_count, 0);
30386 - atomic_set(&md->event_nr, 0);
30387 - atomic_set(&md->uevent_seq, 0);
30388 + atomic_set_unchecked(&md->event_nr, 0);
30389 + atomic_set_unchecked(&md->uevent_seq, 0);
30390 INIT_LIST_HEAD(&md->uevent_list);
30391 spin_lock_init(&md->uevent_lock);
30392
30393 @@ -1921,7 +1921,7 @@ static void event_callback(void *context
30394
30395 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
30396
30397 - atomic_inc(&md->event_nr);
30398 + atomic_inc_unchecked(&md->event_nr);
30399 wake_up(&md->eventq);
30400 }
30401
30402 @@ -2556,18 +2556,18 @@ void dm_kobject_uevent(struct mapped_dev
30403
30404 uint32_t dm_next_uevent_seq(struct mapped_device *md)
30405 {
30406 - return atomic_add_return(1, &md->uevent_seq);
30407 + return atomic_add_return_unchecked(1, &md->uevent_seq);
30408 }
30409
30410 uint32_t dm_get_event_nr(struct mapped_device *md)
30411 {
30412 - return atomic_read(&md->event_nr);
30413 + return atomic_read_unchecked(&md->event_nr);
30414 }
30415
30416 int dm_wait_event(struct mapped_device *md, int event_nr)
30417 {
30418 return wait_event_interruptible(md->eventq,
30419 - (event_nr != atomic_read(&md->event_nr)));
30420 + (event_nr != atomic_read_unchecked(&md->event_nr)));
30421 }
30422
30423 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
30424 diff -urNp linux-2.6.32.42/drivers/md/dm-ioctl.c linux-2.6.32.42/drivers/md/dm-ioctl.c
30425 --- linux-2.6.32.42/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
30426 +++ linux-2.6.32.42/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
30427 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
30428 cmd == DM_LIST_VERSIONS_CMD)
30429 return 0;
30430
30431 - if ((cmd == DM_DEV_CREATE_CMD)) {
30432 + if (cmd == DM_DEV_CREATE_CMD) {
30433 if (!*param->name) {
30434 DMWARN("name not supplied when creating device");
30435 return -EINVAL;
30436 diff -urNp linux-2.6.32.42/drivers/md/dm-raid1.c linux-2.6.32.42/drivers/md/dm-raid1.c
30437 --- linux-2.6.32.42/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
30438 +++ linux-2.6.32.42/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
30439 @@ -41,7 +41,7 @@ enum dm_raid1_error {
30440
30441 struct mirror {
30442 struct mirror_set *ms;
30443 - atomic_t error_count;
30444 + atomic_unchecked_t error_count;
30445 unsigned long error_type;
30446 struct dm_dev *dev;
30447 sector_t offset;
30448 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
30449 * simple way to tell if a device has encountered
30450 * errors.
30451 */
30452 - atomic_inc(&m->error_count);
30453 + atomic_inc_unchecked(&m->error_count);
30454
30455 if (test_and_set_bit(error_type, &m->error_type))
30456 return;
30457 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
30458 }
30459
30460 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
30461 - if (!atomic_read(&new->error_count)) {
30462 + if (!atomic_read_unchecked(&new->error_count)) {
30463 set_default_mirror(new);
30464 break;
30465 }
30466 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
30467 struct mirror *m = get_default_mirror(ms);
30468
30469 do {
30470 - if (likely(!atomic_read(&m->error_count)))
30471 + if (likely(!atomic_read_unchecked(&m->error_count)))
30472 return m;
30473
30474 if (m-- == ms->mirror)
30475 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
30476 {
30477 struct mirror *default_mirror = get_default_mirror(m->ms);
30478
30479 - return !atomic_read(&default_mirror->error_count);
30480 + return !atomic_read_unchecked(&default_mirror->error_count);
30481 }
30482
30483 static int mirror_available(struct mirror_set *ms, struct bio *bio)
30484 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
30485 */
30486 if (likely(region_in_sync(ms, region, 1)))
30487 m = choose_mirror(ms, bio->bi_sector);
30488 - else if (m && atomic_read(&m->error_count))
30489 + else if (m && atomic_read_unchecked(&m->error_count))
30490 m = NULL;
30491
30492 if (likely(m))
30493 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
30494 }
30495
30496 ms->mirror[mirror].ms = ms;
30497 - atomic_set(&(ms->mirror[mirror].error_count), 0);
30498 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
30499 ms->mirror[mirror].error_type = 0;
30500 ms->mirror[mirror].offset = offset;
30501
30502 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
30503 */
30504 static char device_status_char(struct mirror *m)
30505 {
30506 - if (!atomic_read(&(m->error_count)))
30507 + if (!atomic_read_unchecked(&(m->error_count)))
30508 return 'A';
30509
30510 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
30511 diff -urNp linux-2.6.32.42/drivers/md/dm-stripe.c linux-2.6.32.42/drivers/md/dm-stripe.c
30512 --- linux-2.6.32.42/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
30513 +++ linux-2.6.32.42/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
30514 @@ -20,7 +20,7 @@ struct stripe {
30515 struct dm_dev *dev;
30516 sector_t physical_start;
30517
30518 - atomic_t error_count;
30519 + atomic_unchecked_t error_count;
30520 };
30521
30522 struct stripe_c {
30523 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
30524 kfree(sc);
30525 return r;
30526 }
30527 - atomic_set(&(sc->stripe[i].error_count), 0);
30528 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
30529 }
30530
30531 ti->private = sc;
30532 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
30533 DMEMIT("%d ", sc->stripes);
30534 for (i = 0; i < sc->stripes; i++) {
30535 DMEMIT("%s ", sc->stripe[i].dev->name);
30536 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
30537 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
30538 'D' : 'A';
30539 }
30540 buffer[i] = '\0';
30541 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
30542 */
30543 for (i = 0; i < sc->stripes; i++)
30544 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
30545 - atomic_inc(&(sc->stripe[i].error_count));
30546 - if (atomic_read(&(sc->stripe[i].error_count)) <
30547 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
30548 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
30549 DM_IO_ERROR_THRESHOLD)
30550 queue_work(kstriped, &sc->kstriped_ws);
30551 }
30552 diff -urNp linux-2.6.32.42/drivers/md/dm-sysfs.c linux-2.6.32.42/drivers/md/dm-sysfs.c
30553 --- linux-2.6.32.42/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
30554 +++ linux-2.6.32.42/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
30555 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
30556 NULL,
30557 };
30558
30559 -static struct sysfs_ops dm_sysfs_ops = {
30560 +static const struct sysfs_ops dm_sysfs_ops = {
30561 .show = dm_attr_show,
30562 };
30563
30564 diff -urNp linux-2.6.32.42/drivers/md/dm-table.c linux-2.6.32.42/drivers/md/dm-table.c
30565 --- linux-2.6.32.42/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
30566 +++ linux-2.6.32.42/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
30567 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
30568 if (!dev_size)
30569 return 0;
30570
30571 - if ((start >= dev_size) || (start + len > dev_size)) {
30572 + if ((start >= dev_size) || (len > dev_size - start)) {
30573 DMWARN("%s: %s too small for target: "
30574 "start=%llu, len=%llu, dev_size=%llu",
30575 dm_device_name(ti->table->md), bdevname(bdev, b),
30576 diff -urNp linux-2.6.32.42/drivers/md/md.c linux-2.6.32.42/drivers/md/md.c
30577 --- linux-2.6.32.42/drivers/md/md.c 2011-06-25 12:55:34.000000000 -0400
30578 +++ linux-2.6.32.42/drivers/md/md.c 2011-06-25 12:56:37.000000000 -0400
30579 @@ -153,10 +153,10 @@ static int start_readonly;
30580 * start build, activate spare
30581 */
30582 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
30583 -static atomic_t md_event_count;
30584 +static atomic_unchecked_t md_event_count;
30585 void md_new_event(mddev_t *mddev)
30586 {
30587 - atomic_inc(&md_event_count);
30588 + atomic_inc_unchecked(&md_event_count);
30589 wake_up(&md_event_waiters);
30590 }
30591 EXPORT_SYMBOL_GPL(md_new_event);
30592 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
30593 */
30594 static void md_new_event_inintr(mddev_t *mddev)
30595 {
30596 - atomic_inc(&md_event_count);
30597 + atomic_inc_unchecked(&md_event_count);
30598 wake_up(&md_event_waiters);
30599 }
30600
30601 @@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
30602
30603 rdev->preferred_minor = 0xffff;
30604 rdev->data_offset = le64_to_cpu(sb->data_offset);
30605 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
30606 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
30607
30608 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
30609 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
30610 @@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
30611 else
30612 sb->resync_offset = cpu_to_le64(0);
30613
30614 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
30615 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
30616
30617 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
30618 sb->size = cpu_to_le64(mddev->dev_sectors);
30619 @@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
30620 static ssize_t
30621 errors_show(mdk_rdev_t *rdev, char *page)
30622 {
30623 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
30624 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
30625 }
30626
30627 static ssize_t
30628 @@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
30629 char *e;
30630 unsigned long n = simple_strtoul(buf, &e, 10);
30631 if (*buf && (*e == 0 || *e == '\n')) {
30632 - atomic_set(&rdev->corrected_errors, n);
30633 + atomic_set_unchecked(&rdev->corrected_errors, n);
30634 return len;
30635 }
30636 return -EINVAL;
30637 @@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
30638 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
30639 kfree(rdev);
30640 }
30641 -static struct sysfs_ops rdev_sysfs_ops = {
30642 +static const struct sysfs_ops rdev_sysfs_ops = {
30643 .show = rdev_attr_show,
30644 .store = rdev_attr_store,
30645 };
30646 @@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
30647 rdev->data_offset = 0;
30648 rdev->sb_events = 0;
30649 atomic_set(&rdev->nr_pending, 0);
30650 - atomic_set(&rdev->read_errors, 0);
30651 - atomic_set(&rdev->corrected_errors, 0);
30652 + atomic_set_unchecked(&rdev->read_errors, 0);
30653 + atomic_set_unchecked(&rdev->corrected_errors, 0);
30654
30655 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
30656 if (!size) {
30657 @@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
30658 kfree(mddev);
30659 }
30660
30661 -static struct sysfs_ops md_sysfs_ops = {
30662 +static const struct sysfs_ops md_sysfs_ops = {
30663 .show = md_attr_show,
30664 .store = md_attr_store,
30665 };
30666 @@ -4474,7 +4474,8 @@ out:
30667 err = 0;
30668 blk_integrity_unregister(disk);
30669 md_new_event(mddev);
30670 - sysfs_notify_dirent(mddev->sysfs_state);
30671 + if (mddev->sysfs_state)
30672 + sysfs_notify_dirent(mddev->sysfs_state);
30673 return err;
30674 }
30675
30676 @@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
30677
30678 spin_unlock(&pers_lock);
30679 seq_printf(seq, "\n");
30680 - mi->event = atomic_read(&md_event_count);
30681 + mi->event = atomic_read_unchecked(&md_event_count);
30682 return 0;
30683 }
30684 if (v == (void*)2) {
30685 @@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
30686 chunk_kb ? "KB" : "B");
30687 if (bitmap->file) {
30688 seq_printf(seq, ", file: ");
30689 - seq_path(seq, &bitmap->file->f_path, " \t\n");
30690 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
30691 }
30692
30693 seq_printf(seq, "\n");
30694 @@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
30695 else {
30696 struct seq_file *p = file->private_data;
30697 p->private = mi;
30698 - mi->event = atomic_read(&md_event_count);
30699 + mi->event = atomic_read_unchecked(&md_event_count);
30700 }
30701 return error;
30702 }
30703 @@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
30704 /* always allow read */
30705 mask = POLLIN | POLLRDNORM;
30706
30707 - if (mi->event != atomic_read(&md_event_count))
30708 + if (mi->event != atomic_read_unchecked(&md_event_count))
30709 mask |= POLLERR | POLLPRI;
30710 return mask;
30711 }
30712 @@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
30713 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
30714 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
30715 (int)part_stat_read(&disk->part0, sectors[1]) -
30716 - atomic_read(&disk->sync_io);
30717 + atomic_read_unchecked(&disk->sync_io);
30718 /* sync IO will cause sync_io to increase before the disk_stats
30719 * as sync_io is counted when a request starts, and
30720 * disk_stats is counted when it completes.
30721 diff -urNp linux-2.6.32.42/drivers/md/md.h linux-2.6.32.42/drivers/md/md.h
30722 --- linux-2.6.32.42/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
30723 +++ linux-2.6.32.42/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
30724 @@ -94,10 +94,10 @@ struct mdk_rdev_s
30725 * only maintained for arrays that
30726 * support hot removal
30727 */
30728 - atomic_t read_errors; /* number of consecutive read errors that
30729 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
30730 * we have tried to ignore.
30731 */
30732 - atomic_t corrected_errors; /* number of corrected read errors,
30733 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
30734 * for reporting to userspace and storing
30735 * in superblock.
30736 */
30737 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
30738
30739 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
30740 {
30741 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
30742 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
30743 }
30744
30745 struct mdk_personality
30746 diff -urNp linux-2.6.32.42/drivers/md/raid10.c linux-2.6.32.42/drivers/md/raid10.c
30747 --- linux-2.6.32.42/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
30748 +++ linux-2.6.32.42/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
30749 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
30750 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
30751 set_bit(R10BIO_Uptodate, &r10_bio->state);
30752 else {
30753 - atomic_add(r10_bio->sectors,
30754 + atomic_add_unchecked(r10_bio->sectors,
30755 &conf->mirrors[d].rdev->corrected_errors);
30756 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
30757 md_error(r10_bio->mddev,
30758 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
30759 test_bit(In_sync, &rdev->flags)) {
30760 atomic_inc(&rdev->nr_pending);
30761 rcu_read_unlock();
30762 - atomic_add(s, &rdev->corrected_errors);
30763 + atomic_add_unchecked(s, &rdev->corrected_errors);
30764 if (sync_page_io(rdev->bdev,
30765 r10_bio->devs[sl].addr +
30766 sect + rdev->data_offset,
30767 diff -urNp linux-2.6.32.42/drivers/md/raid1.c linux-2.6.32.42/drivers/md/raid1.c
30768 --- linux-2.6.32.42/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
30769 +++ linux-2.6.32.42/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
30770 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
30771 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
30772 continue;
30773 rdev = conf->mirrors[d].rdev;
30774 - atomic_add(s, &rdev->corrected_errors);
30775 + atomic_add_unchecked(s, &rdev->corrected_errors);
30776 if (sync_page_io(rdev->bdev,
30777 sect + rdev->data_offset,
30778 s<<9,
30779 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
30780 /* Well, this device is dead */
30781 md_error(mddev, rdev);
30782 else {
30783 - atomic_add(s, &rdev->corrected_errors);
30784 + atomic_add_unchecked(s, &rdev->corrected_errors);
30785 printk(KERN_INFO
30786 "raid1:%s: read error corrected "
30787 "(%d sectors at %llu on %s)\n",
30788 diff -urNp linux-2.6.32.42/drivers/md/raid5.c linux-2.6.32.42/drivers/md/raid5.c
30789 --- linux-2.6.32.42/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
30790 +++ linux-2.6.32.42/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
30791 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
30792 bi->bi_next = NULL;
30793 if ((rw & WRITE) &&
30794 test_bit(R5_ReWrite, &sh->dev[i].flags))
30795 - atomic_add(STRIPE_SECTORS,
30796 + atomic_add_unchecked(STRIPE_SECTORS,
30797 &rdev->corrected_errors);
30798 generic_make_request(bi);
30799 } else {
30800 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
30801 clear_bit(R5_ReadError, &sh->dev[i].flags);
30802 clear_bit(R5_ReWrite, &sh->dev[i].flags);
30803 }
30804 - if (atomic_read(&conf->disks[i].rdev->read_errors))
30805 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
30806 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
30807 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
30808 } else {
30809 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
30810 int retry = 0;
30811 rdev = conf->disks[i].rdev;
30812
30813 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
30814 - atomic_inc(&rdev->read_errors);
30815 + atomic_inc_unchecked(&rdev->read_errors);
30816 if (conf->mddev->degraded >= conf->max_degraded)
30817 printk_rl(KERN_WARNING
30818 "raid5:%s: read error not correctable "
30819 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
30820 (unsigned long long)(sh->sector
30821 + rdev->data_offset),
30822 bdn);
30823 - else if (atomic_read(&rdev->read_errors)
30824 + else if (atomic_read_unchecked(&rdev->read_errors)
30825 > conf->max_nr_stripes)
30826 printk(KERN_WARNING
30827 "raid5:%s: Too many read errors, failing device %s.\n",
30828 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
30829 sector_t r_sector;
30830 struct stripe_head sh2;
30831
30832 + pax_track_stack();
30833
30834 chunk_offset = sector_div(new_sector, sectors_per_chunk);
30835 stripe = new_sector;
30836 diff -urNp linux-2.6.32.42/drivers/media/common/saa7146_hlp.c linux-2.6.32.42/drivers/media/common/saa7146_hlp.c
30837 --- linux-2.6.32.42/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
30838 +++ linux-2.6.32.42/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
30839 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
30840
30841 int x[32], y[32], w[32], h[32];
30842
30843 + pax_track_stack();
30844 +
30845 /* clear out memory */
30846 memset(&line_list[0], 0x00, sizeof(u32)*32);
30847 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
30848 diff -urNp linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
30849 --- linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
30850 +++ linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
30851 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
30852 u8 buf[HOST_LINK_BUF_SIZE];
30853 int i;
30854
30855 + pax_track_stack();
30856 +
30857 dprintk("%s\n", __func__);
30858
30859 /* check if we have space for a link buf in the rx_buffer */
30860 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
30861 unsigned long timeout;
30862 int written;
30863
30864 + pax_track_stack();
30865 +
30866 dprintk("%s\n", __func__);
30867
30868 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
30869 diff -urNp linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c
30870 --- linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
30871 +++ linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c 2011-04-17 15:56:46.000000000 -0400
30872 @@ -191,6 +191,7 @@ int dvb_register_device(struct dvb_adapt
30873 const struct dvb_device *template, void *priv, int type)
30874 {
30875 struct dvb_device *dvbdev;
30876 + /* cannot be const */
30877 struct file_operations *dvbdevfops;
30878 struct device *clsdev;
30879 int minor;
30880 diff -urNp linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c
30881 --- linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
30882 +++ linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
30883 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
30884
30885 u8 buf[260];
30886
30887 + pax_track_stack();
30888 +
30889 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
30890 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
30891
30892 diff -urNp linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c
30893 --- linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
30894 +++ linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
30895 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
30896 u8 tudata[585];
30897 int i;
30898
30899 + pax_track_stack();
30900 +
30901 dprintk("Firmware is %zd bytes\n",fw->size);
30902
30903 /* Get eprom data */
30904 diff -urNp linux-2.6.32.42/drivers/media/radio/radio-cadet.c linux-2.6.32.42/drivers/media/radio/radio-cadet.c
30905 --- linux-2.6.32.42/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
30906 +++ linux-2.6.32.42/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
30907 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
30908 while (i < count && dev->rdsin != dev->rdsout)
30909 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
30910
30911 - if (copy_to_user(data, readbuf, i))
30912 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
30913 return -EFAULT;
30914 return i;
30915 }
30916 diff -urNp linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c
30917 --- linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
30918 +++ linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
30919 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
30920
30921 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
30922
30923 -static atomic_t cx18_instance = ATOMIC_INIT(0);
30924 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
30925
30926 /* Parameter declarations */
30927 static int cardtype[CX18_MAX_CARDS];
30928 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
30929 struct i2c_client c;
30930 u8 eedata[256];
30931
30932 + pax_track_stack();
30933 +
30934 memset(&c, 0, sizeof(c));
30935 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
30936 c.adapter = &cx->i2c_adap[0];
30937 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
30938 struct cx18 *cx;
30939
30940 /* FIXME - module parameter arrays constrain max instances */
30941 - i = atomic_inc_return(&cx18_instance) - 1;
30942 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
30943 if (i >= CX18_MAX_CARDS) {
30944 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
30945 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
30946 diff -urNp linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c
30947 --- linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
30948 +++ linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
30949 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
30950 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
30951
30952 /* ivtv instance counter */
30953 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
30954 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
30955
30956 /* Parameter declarations */
30957 static int cardtype[IVTV_MAX_CARDS];
30958 diff -urNp linux-2.6.32.42/drivers/media/video/omap24xxcam.c linux-2.6.32.42/drivers/media/video/omap24xxcam.c
30959 --- linux-2.6.32.42/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
30960 +++ linux-2.6.32.42/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
30961 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
30962 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
30963
30964 do_gettimeofday(&vb->ts);
30965 - vb->field_count = atomic_add_return(2, &fh->field_count);
30966 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
30967 if (csr & csr_error) {
30968 vb->state = VIDEOBUF_ERROR;
30969 if (!atomic_read(&fh->cam->in_reset)) {
30970 diff -urNp linux-2.6.32.42/drivers/media/video/omap24xxcam.h linux-2.6.32.42/drivers/media/video/omap24xxcam.h
30971 --- linux-2.6.32.42/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
30972 +++ linux-2.6.32.42/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
30973 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
30974 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
30975 struct videobuf_queue vbq;
30976 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
30977 - atomic_t field_count; /* field counter for videobuf_buffer */
30978 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
30979 /* accessing cam here doesn't need serialisation: it's constant */
30980 struct omap24xxcam_device *cam;
30981 };
30982 diff -urNp linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
30983 --- linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
30984 +++ linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
30985 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
30986 u8 *eeprom;
30987 struct tveeprom tvdata;
30988
30989 + pax_track_stack();
30990 +
30991 memset(&tvdata,0,sizeof(tvdata));
30992
30993 eeprom = pvr2_eeprom_fetch(hdw);
30994 diff -urNp linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c
30995 --- linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
30996 +++ linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
30997 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
30998 unsigned char localPAT[256];
30999 unsigned char localPMT[256];
31000
31001 + pax_track_stack();
31002 +
31003 /* Set video format - must be done first as it resets other settings */
31004 set_reg8(client, 0x41, h->video_format);
31005
31006 diff -urNp linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c
31007 --- linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
31008 +++ linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
31009 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
31010 wait_queue_head_t *q = 0;
31011 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31012
31013 + pax_track_stack();
31014 +
31015 /* While any outstand message on the bus exists... */
31016 do {
31017
31018 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
31019 u8 tmp[512];
31020 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31021
31022 + pax_track_stack();
31023 +
31024 while (loop) {
31025
31026 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
31027 diff -urNp linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c
31028 --- linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
31029 +++ linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c 2011-04-17 15:56:46.000000000 -0400
31030 @@ -225,7 +225,7 @@ static void konicawc_register_input(stru
31031 int error;
31032
31033 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
31034 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31035 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31036
31037 cam->input = input_dev = input_allocate_device();
31038 if (!input_dev) {
31039 diff -urNp linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c
31040 --- linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
31041 +++ linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
31042 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
31043 int error;
31044
31045 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
31046 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31047 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31048
31049 cam->input = input_dev = input_allocate_device();
31050 if (!input_dev) {
31051 diff -urNp linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c
31052 --- linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
31053 +++ linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
31054 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
31055 unsigned char rv, gv, bv;
31056 static unsigned char *Y, *U, *V;
31057
31058 + pax_track_stack();
31059 +
31060 frame = usbvision->curFrame;
31061 imageSize = frame->frmwidth * frame->frmheight;
31062 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
31063 diff -urNp linux-2.6.32.42/drivers/media/video/v4l2-device.c linux-2.6.32.42/drivers/media/video/v4l2-device.c
31064 --- linux-2.6.32.42/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
31065 +++ linux-2.6.32.42/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
31066 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
31067 EXPORT_SYMBOL_GPL(v4l2_device_register);
31068
31069 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
31070 - atomic_t *instance)
31071 + atomic_unchecked_t *instance)
31072 {
31073 - int num = atomic_inc_return(instance) - 1;
31074 + int num = atomic_inc_return_unchecked(instance) - 1;
31075 int len = strlen(basename);
31076
31077 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
31078 diff -urNp linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c
31079 --- linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
31080 +++ linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
31081 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
31082 {
31083 struct videobuf_queue q;
31084
31085 + pax_track_stack();
31086 +
31087 /* Required to make generic handler to call __videobuf_alloc */
31088 q.int_ops = &sg_ops;
31089
31090 diff -urNp linux-2.6.32.42/drivers/message/fusion/mptbase.c linux-2.6.32.42/drivers/message/fusion/mptbase.c
31091 --- linux-2.6.32.42/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
31092 +++ linux-2.6.32.42/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
31093 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
31094 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31095 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31096
31097 +#ifdef CONFIG_GRKERNSEC_HIDESYM
31098 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31099 + NULL, NULL);
31100 +#else
31101 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31102 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31103 +#endif
31104 +
31105 /*
31106 * Rounding UP to nearest 4-kB boundary here...
31107 */
31108 diff -urNp linux-2.6.32.42/drivers/message/fusion/mptsas.c linux-2.6.32.42/drivers/message/fusion/mptsas.c
31109 --- linux-2.6.32.42/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
31110 +++ linux-2.6.32.42/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
31111 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
31112 return 0;
31113 }
31114
31115 +static inline void
31116 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31117 +{
31118 + if (phy_info->port_details) {
31119 + phy_info->port_details->rphy = rphy;
31120 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31121 + ioc->name, rphy));
31122 + }
31123 +
31124 + if (rphy) {
31125 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31126 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31127 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31128 + ioc->name, rphy, rphy->dev.release));
31129 + }
31130 +}
31131 +
31132 /* no mutex */
31133 static void
31134 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31135 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
31136 return NULL;
31137 }
31138
31139 -static inline void
31140 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31141 -{
31142 - if (phy_info->port_details) {
31143 - phy_info->port_details->rphy = rphy;
31144 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31145 - ioc->name, rphy));
31146 - }
31147 -
31148 - if (rphy) {
31149 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31150 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31151 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31152 - ioc->name, rphy, rphy->dev.release));
31153 - }
31154 -}
31155 -
31156 static inline struct sas_port *
31157 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31158 {
31159 diff -urNp linux-2.6.32.42/drivers/message/fusion/mptscsih.c linux-2.6.32.42/drivers/message/fusion/mptscsih.c
31160 --- linux-2.6.32.42/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
31161 +++ linux-2.6.32.42/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
31162 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31163
31164 h = shost_priv(SChost);
31165
31166 - if (h) {
31167 - if (h->info_kbuf == NULL)
31168 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31169 - return h->info_kbuf;
31170 - h->info_kbuf[0] = '\0';
31171 + if (!h)
31172 + return NULL;
31173
31174 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31175 - h->info_kbuf[size-1] = '\0';
31176 - }
31177 + if (h->info_kbuf == NULL)
31178 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31179 + return h->info_kbuf;
31180 + h->info_kbuf[0] = '\0';
31181 +
31182 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31183 + h->info_kbuf[size-1] = '\0';
31184
31185 return h->info_kbuf;
31186 }
31187 diff -urNp linux-2.6.32.42/drivers/message/i2o/i2o_config.c linux-2.6.32.42/drivers/message/i2o/i2o_config.c
31188 --- linux-2.6.32.42/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
31189 +++ linux-2.6.32.42/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
31190 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
31191 struct i2o_message *msg;
31192 unsigned int iop;
31193
31194 + pax_track_stack();
31195 +
31196 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
31197 return -EFAULT;
31198
31199 diff -urNp linux-2.6.32.42/drivers/message/i2o/i2o_proc.c linux-2.6.32.42/drivers/message/i2o/i2o_proc.c
31200 --- linux-2.6.32.42/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
31201 +++ linux-2.6.32.42/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
31202 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
31203 "Array Controller Device"
31204 };
31205
31206 -static char *chtostr(u8 * chars, int n)
31207 -{
31208 - char tmp[256];
31209 - tmp[0] = 0;
31210 - return strncat(tmp, (char *)chars, n);
31211 -}
31212 -
31213 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31214 char *group)
31215 {
31216 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
31217
31218 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31219 seq_printf(seq, "%-#8x", ddm_table.module_id);
31220 - seq_printf(seq, "%-29s",
31221 - chtostr(ddm_table.module_name_version, 28));
31222 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31223 seq_printf(seq, "%9d ", ddm_table.data_size);
31224 seq_printf(seq, "%8d", ddm_table.code_size);
31225
31226 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
31227
31228 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31229 seq_printf(seq, "%-#8x", dst->module_id);
31230 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31231 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31232 + seq_printf(seq, "%-.28s", dst->module_name_version);
31233 + seq_printf(seq, "%-.8s", dst->date);
31234 seq_printf(seq, "%8d ", dst->module_size);
31235 seq_printf(seq, "%8d ", dst->mpb_size);
31236 seq_printf(seq, "0x%04x", dst->module_flags);
31237 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
31238 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31239 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31240 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31241 - seq_printf(seq, "Vendor info : %s\n",
31242 - chtostr((u8 *) (work32 + 2), 16));
31243 - seq_printf(seq, "Product info : %s\n",
31244 - chtostr((u8 *) (work32 + 6), 16));
31245 - seq_printf(seq, "Description : %s\n",
31246 - chtostr((u8 *) (work32 + 10), 16));
31247 - seq_printf(seq, "Product rev. : %s\n",
31248 - chtostr((u8 *) (work32 + 14), 8));
31249 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31250 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31251 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31252 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31253
31254 seq_printf(seq, "Serial number : ");
31255 print_serial_number(seq, (u8 *) (work32 + 16),
31256 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
31257 }
31258
31259 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31260 - seq_printf(seq, "Module name : %s\n",
31261 - chtostr(result.module_name, 24));
31262 - seq_printf(seq, "Module revision : %s\n",
31263 - chtostr(result.module_rev, 8));
31264 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
31265 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31266
31267 seq_printf(seq, "Serial number : ");
31268 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31269 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
31270 return 0;
31271 }
31272
31273 - seq_printf(seq, "Device name : %s\n",
31274 - chtostr(result.device_name, 64));
31275 - seq_printf(seq, "Service name : %s\n",
31276 - chtostr(result.service_name, 64));
31277 - seq_printf(seq, "Physical name : %s\n",
31278 - chtostr(result.physical_location, 64));
31279 - seq_printf(seq, "Instance number : %s\n",
31280 - chtostr(result.instance_number, 4));
31281 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
31282 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
31283 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31284 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31285
31286 return 0;
31287 }
31288 diff -urNp linux-2.6.32.42/drivers/message/i2o/iop.c linux-2.6.32.42/drivers/message/i2o/iop.c
31289 --- linux-2.6.32.42/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
31290 +++ linux-2.6.32.42/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
31291 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
31292
31293 spin_lock_irqsave(&c->context_list_lock, flags);
31294
31295 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31296 - atomic_inc(&c->context_list_counter);
31297 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31298 + atomic_inc_unchecked(&c->context_list_counter);
31299
31300 - entry->context = atomic_read(&c->context_list_counter);
31301 + entry->context = atomic_read_unchecked(&c->context_list_counter);
31302
31303 list_add(&entry->list, &c->context_list);
31304
31305 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
31306
31307 #if BITS_PER_LONG == 64
31308 spin_lock_init(&c->context_list_lock);
31309 - atomic_set(&c->context_list_counter, 0);
31310 + atomic_set_unchecked(&c->context_list_counter, 0);
31311 INIT_LIST_HEAD(&c->context_list);
31312 #endif
31313
31314 diff -urNp linux-2.6.32.42/drivers/mfd/wm8350-i2c.c linux-2.6.32.42/drivers/mfd/wm8350-i2c.c
31315 --- linux-2.6.32.42/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
31316 +++ linux-2.6.32.42/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
31317 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
31318 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
31319 int ret;
31320
31321 + pax_track_stack();
31322 +
31323 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
31324 return -EINVAL;
31325
31326 diff -urNp linux-2.6.32.42/drivers/misc/kgdbts.c linux-2.6.32.42/drivers/misc/kgdbts.c
31327 --- linux-2.6.32.42/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
31328 +++ linux-2.6.32.42/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
31329 @@ -118,7 +118,7 @@
31330 } while (0)
31331 #define MAX_CONFIG_LEN 40
31332
31333 -static struct kgdb_io kgdbts_io_ops;
31334 +static const struct kgdb_io kgdbts_io_ops;
31335 static char get_buf[BUFMAX];
31336 static int get_buf_cnt;
31337 static char put_buf[BUFMAX];
31338 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
31339 module_put(THIS_MODULE);
31340 }
31341
31342 -static struct kgdb_io kgdbts_io_ops = {
31343 +static const struct kgdb_io kgdbts_io_ops = {
31344 .name = "kgdbts",
31345 .read_char = kgdbts_get_char,
31346 .write_char = kgdbts_put_char,
31347 diff -urNp linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c
31348 --- linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
31349 +++ linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
31350 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
31351
31352 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
31353 {
31354 - atomic_long_inc(&mcs_op_statistics[op].count);
31355 - atomic_long_add(clks, &mcs_op_statistics[op].total);
31356 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
31357 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
31358 if (mcs_op_statistics[op].max < clks)
31359 mcs_op_statistics[op].max = clks;
31360 }
31361 diff -urNp linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c
31362 --- linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
31363 +++ linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
31364 @@ -32,9 +32,9 @@
31365
31366 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
31367
31368 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
31369 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
31370 {
31371 - unsigned long val = atomic_long_read(v);
31372 + unsigned long val = atomic_long_read_unchecked(v);
31373
31374 if (val)
31375 seq_printf(s, "%16lu %s\n", val, id);
31376 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
31377 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
31378
31379 for (op = 0; op < mcsop_last; op++) {
31380 - count = atomic_long_read(&mcs_op_statistics[op].count);
31381 - total = atomic_long_read(&mcs_op_statistics[op].total);
31382 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
31383 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
31384 max = mcs_op_statistics[op].max;
31385 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
31386 count ? total / count : 0, max);
31387 diff -urNp linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h
31388 --- linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
31389 +++ linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
31390 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
31391 * GRU statistics.
31392 */
31393 struct gru_stats_s {
31394 - atomic_long_t vdata_alloc;
31395 - atomic_long_t vdata_free;
31396 - atomic_long_t gts_alloc;
31397 - atomic_long_t gts_free;
31398 - atomic_long_t vdata_double_alloc;
31399 - atomic_long_t gts_double_allocate;
31400 - atomic_long_t assign_context;
31401 - atomic_long_t assign_context_failed;
31402 - atomic_long_t free_context;
31403 - atomic_long_t load_user_context;
31404 - atomic_long_t load_kernel_context;
31405 - atomic_long_t lock_kernel_context;
31406 - atomic_long_t unlock_kernel_context;
31407 - atomic_long_t steal_user_context;
31408 - atomic_long_t steal_kernel_context;
31409 - atomic_long_t steal_context_failed;
31410 - atomic_long_t nopfn;
31411 - atomic_long_t break_cow;
31412 - atomic_long_t asid_new;
31413 - atomic_long_t asid_next;
31414 - atomic_long_t asid_wrap;
31415 - atomic_long_t asid_reuse;
31416 - atomic_long_t intr;
31417 - atomic_long_t intr_mm_lock_failed;
31418 - atomic_long_t call_os;
31419 - atomic_long_t call_os_offnode_reference;
31420 - atomic_long_t call_os_check_for_bug;
31421 - atomic_long_t call_os_wait_queue;
31422 - atomic_long_t user_flush_tlb;
31423 - atomic_long_t user_unload_context;
31424 - atomic_long_t user_exception;
31425 - atomic_long_t set_context_option;
31426 - atomic_long_t migrate_check;
31427 - atomic_long_t migrated_retarget;
31428 - atomic_long_t migrated_unload;
31429 - atomic_long_t migrated_unload_delay;
31430 - atomic_long_t migrated_nopfn_retarget;
31431 - atomic_long_t migrated_nopfn_unload;
31432 - atomic_long_t tlb_dropin;
31433 - atomic_long_t tlb_dropin_fail_no_asid;
31434 - atomic_long_t tlb_dropin_fail_upm;
31435 - atomic_long_t tlb_dropin_fail_invalid;
31436 - atomic_long_t tlb_dropin_fail_range_active;
31437 - atomic_long_t tlb_dropin_fail_idle;
31438 - atomic_long_t tlb_dropin_fail_fmm;
31439 - atomic_long_t tlb_dropin_fail_no_exception;
31440 - atomic_long_t tlb_dropin_fail_no_exception_war;
31441 - atomic_long_t tfh_stale_on_fault;
31442 - atomic_long_t mmu_invalidate_range;
31443 - atomic_long_t mmu_invalidate_page;
31444 - atomic_long_t mmu_clear_flush_young;
31445 - atomic_long_t flush_tlb;
31446 - atomic_long_t flush_tlb_gru;
31447 - atomic_long_t flush_tlb_gru_tgh;
31448 - atomic_long_t flush_tlb_gru_zero_asid;
31449 -
31450 - atomic_long_t copy_gpa;
31451 -
31452 - atomic_long_t mesq_receive;
31453 - atomic_long_t mesq_receive_none;
31454 - atomic_long_t mesq_send;
31455 - atomic_long_t mesq_send_failed;
31456 - atomic_long_t mesq_noop;
31457 - atomic_long_t mesq_send_unexpected_error;
31458 - atomic_long_t mesq_send_lb_overflow;
31459 - atomic_long_t mesq_send_qlimit_reached;
31460 - atomic_long_t mesq_send_amo_nacked;
31461 - atomic_long_t mesq_send_put_nacked;
31462 - atomic_long_t mesq_qf_not_full;
31463 - atomic_long_t mesq_qf_locked;
31464 - atomic_long_t mesq_qf_noop_not_full;
31465 - atomic_long_t mesq_qf_switch_head_failed;
31466 - atomic_long_t mesq_qf_unexpected_error;
31467 - atomic_long_t mesq_noop_unexpected_error;
31468 - atomic_long_t mesq_noop_lb_overflow;
31469 - atomic_long_t mesq_noop_qlimit_reached;
31470 - atomic_long_t mesq_noop_amo_nacked;
31471 - atomic_long_t mesq_noop_put_nacked;
31472 + atomic_long_unchecked_t vdata_alloc;
31473 + atomic_long_unchecked_t vdata_free;
31474 + atomic_long_unchecked_t gts_alloc;
31475 + atomic_long_unchecked_t gts_free;
31476 + atomic_long_unchecked_t vdata_double_alloc;
31477 + atomic_long_unchecked_t gts_double_allocate;
31478 + atomic_long_unchecked_t assign_context;
31479 + atomic_long_unchecked_t assign_context_failed;
31480 + atomic_long_unchecked_t free_context;
31481 + atomic_long_unchecked_t load_user_context;
31482 + atomic_long_unchecked_t load_kernel_context;
31483 + atomic_long_unchecked_t lock_kernel_context;
31484 + atomic_long_unchecked_t unlock_kernel_context;
31485 + atomic_long_unchecked_t steal_user_context;
31486 + atomic_long_unchecked_t steal_kernel_context;
31487 + atomic_long_unchecked_t steal_context_failed;
31488 + atomic_long_unchecked_t nopfn;
31489 + atomic_long_unchecked_t break_cow;
31490 + atomic_long_unchecked_t asid_new;
31491 + atomic_long_unchecked_t asid_next;
31492 + atomic_long_unchecked_t asid_wrap;
31493 + atomic_long_unchecked_t asid_reuse;
31494 + atomic_long_unchecked_t intr;
31495 + atomic_long_unchecked_t intr_mm_lock_failed;
31496 + atomic_long_unchecked_t call_os;
31497 + atomic_long_unchecked_t call_os_offnode_reference;
31498 + atomic_long_unchecked_t call_os_check_for_bug;
31499 + atomic_long_unchecked_t call_os_wait_queue;
31500 + atomic_long_unchecked_t user_flush_tlb;
31501 + atomic_long_unchecked_t user_unload_context;
31502 + atomic_long_unchecked_t user_exception;
31503 + atomic_long_unchecked_t set_context_option;
31504 + atomic_long_unchecked_t migrate_check;
31505 + atomic_long_unchecked_t migrated_retarget;
31506 + atomic_long_unchecked_t migrated_unload;
31507 + atomic_long_unchecked_t migrated_unload_delay;
31508 + atomic_long_unchecked_t migrated_nopfn_retarget;
31509 + atomic_long_unchecked_t migrated_nopfn_unload;
31510 + atomic_long_unchecked_t tlb_dropin;
31511 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
31512 + atomic_long_unchecked_t tlb_dropin_fail_upm;
31513 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
31514 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
31515 + atomic_long_unchecked_t tlb_dropin_fail_idle;
31516 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
31517 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
31518 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
31519 + atomic_long_unchecked_t tfh_stale_on_fault;
31520 + atomic_long_unchecked_t mmu_invalidate_range;
31521 + atomic_long_unchecked_t mmu_invalidate_page;
31522 + atomic_long_unchecked_t mmu_clear_flush_young;
31523 + atomic_long_unchecked_t flush_tlb;
31524 + atomic_long_unchecked_t flush_tlb_gru;
31525 + atomic_long_unchecked_t flush_tlb_gru_tgh;
31526 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
31527 +
31528 + atomic_long_unchecked_t copy_gpa;
31529 +
31530 + atomic_long_unchecked_t mesq_receive;
31531 + atomic_long_unchecked_t mesq_receive_none;
31532 + atomic_long_unchecked_t mesq_send;
31533 + atomic_long_unchecked_t mesq_send_failed;
31534 + atomic_long_unchecked_t mesq_noop;
31535 + atomic_long_unchecked_t mesq_send_unexpected_error;
31536 + atomic_long_unchecked_t mesq_send_lb_overflow;
31537 + atomic_long_unchecked_t mesq_send_qlimit_reached;
31538 + atomic_long_unchecked_t mesq_send_amo_nacked;
31539 + atomic_long_unchecked_t mesq_send_put_nacked;
31540 + atomic_long_unchecked_t mesq_qf_not_full;
31541 + atomic_long_unchecked_t mesq_qf_locked;
31542 + atomic_long_unchecked_t mesq_qf_noop_not_full;
31543 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
31544 + atomic_long_unchecked_t mesq_qf_unexpected_error;
31545 + atomic_long_unchecked_t mesq_noop_unexpected_error;
31546 + atomic_long_unchecked_t mesq_noop_lb_overflow;
31547 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
31548 + atomic_long_unchecked_t mesq_noop_amo_nacked;
31549 + atomic_long_unchecked_t mesq_noop_put_nacked;
31550
31551 };
31552
31553 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
31554 cchop_deallocate, tghop_invalidate, mcsop_last};
31555
31556 struct mcs_op_statistic {
31557 - atomic_long_t count;
31558 - atomic_long_t total;
31559 + atomic_long_unchecked_t count;
31560 + atomic_long_unchecked_t total;
31561 unsigned long max;
31562 };
31563
31564 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
31565
31566 #define STAT(id) do { \
31567 if (gru_options & OPT_STATS) \
31568 - atomic_long_inc(&gru_stats.id); \
31569 + atomic_long_inc_unchecked(&gru_stats.id); \
31570 } while (0)
31571
31572 #ifdef CONFIG_SGI_GRU_DEBUG
31573 diff -urNp linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c
31574 --- linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
31575 +++ linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
31576 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
31577 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
31578 unsigned long timeo = jiffies + HZ;
31579
31580 + pax_track_stack();
31581 +
31582 /* Prevent setting state FL_SYNCING for chip in suspended state. */
31583 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
31584 goto sleep;
31585 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
31586 unsigned long initial_adr;
31587 int initial_len = len;
31588
31589 + pax_track_stack();
31590 +
31591 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
31592 adr += chip->start;
31593 initial_adr = adr;
31594 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
31595 int retries = 3;
31596 int ret;
31597
31598 + pax_track_stack();
31599 +
31600 adr += chip->start;
31601
31602 retry:
31603 diff -urNp linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c
31604 --- linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
31605 +++ linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
31606 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
31607 unsigned long cmd_addr;
31608 struct cfi_private *cfi = map->fldrv_priv;
31609
31610 + pax_track_stack();
31611 +
31612 adr += chip->start;
31613
31614 /* Ensure cmd read/writes are aligned. */
31615 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
31616 DECLARE_WAITQUEUE(wait, current);
31617 int wbufsize, z;
31618
31619 + pax_track_stack();
31620 +
31621 /* M58LW064A requires bus alignment for buffer wriets -- saw */
31622 if (adr & (map_bankwidth(map)-1))
31623 return -EINVAL;
31624 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
31625 DECLARE_WAITQUEUE(wait, current);
31626 int ret = 0;
31627
31628 + pax_track_stack();
31629 +
31630 adr += chip->start;
31631
31632 /* Let's determine this according to the interleave only once */
31633 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
31634 unsigned long timeo = jiffies + HZ;
31635 DECLARE_WAITQUEUE(wait, current);
31636
31637 + pax_track_stack();
31638 +
31639 adr += chip->start;
31640
31641 /* Let's determine this according to the interleave only once */
31642 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
31643 unsigned long timeo = jiffies + HZ;
31644 DECLARE_WAITQUEUE(wait, current);
31645
31646 + pax_track_stack();
31647 +
31648 adr += chip->start;
31649
31650 /* Let's determine this according to the interleave only once */
31651 diff -urNp linux-2.6.32.42/drivers/mtd/devices/doc2000.c linux-2.6.32.42/drivers/mtd/devices/doc2000.c
31652 --- linux-2.6.32.42/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
31653 +++ linux-2.6.32.42/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
31654 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
31655
31656 /* The ECC will not be calculated correctly if less than 512 is written */
31657 /* DBB-
31658 - if (len != 0x200 && eccbuf)
31659 + if (len != 0x200)
31660 printk(KERN_WARNING
31661 "ECC needs a full sector write (adr: %lx size %lx)\n",
31662 (long) to, (long) len);
31663 diff -urNp linux-2.6.32.42/drivers/mtd/devices/doc2001.c linux-2.6.32.42/drivers/mtd/devices/doc2001.c
31664 --- linux-2.6.32.42/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
31665 +++ linux-2.6.32.42/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
31666 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
31667 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
31668
31669 /* Don't allow read past end of device */
31670 - if (from >= this->totlen)
31671 + if (from >= this->totlen || !len)
31672 return -EINVAL;
31673
31674 /* Don't allow a single read to cross a 512-byte block boundary */
31675 diff -urNp linux-2.6.32.42/drivers/mtd/ftl.c linux-2.6.32.42/drivers/mtd/ftl.c
31676 --- linux-2.6.32.42/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
31677 +++ linux-2.6.32.42/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
31678 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
31679 loff_t offset;
31680 uint16_t srcunitswap = cpu_to_le16(srcunit);
31681
31682 + pax_track_stack();
31683 +
31684 eun = &part->EUNInfo[srcunit];
31685 xfer = &part->XferInfo[xferunit];
31686 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
31687 diff -urNp linux-2.6.32.42/drivers/mtd/inftlcore.c linux-2.6.32.42/drivers/mtd/inftlcore.c
31688 --- linux-2.6.32.42/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
31689 +++ linux-2.6.32.42/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
31690 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
31691 struct inftl_oob oob;
31692 size_t retlen;
31693
31694 + pax_track_stack();
31695 +
31696 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
31697 "pending=%d)\n", inftl, thisVUC, pendingblock);
31698
31699 diff -urNp linux-2.6.32.42/drivers/mtd/inftlmount.c linux-2.6.32.42/drivers/mtd/inftlmount.c
31700 --- linux-2.6.32.42/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
31701 +++ linux-2.6.32.42/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
31702 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
31703 struct INFTLPartition *ip;
31704 size_t retlen;
31705
31706 + pax_track_stack();
31707 +
31708 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
31709
31710 /*
31711 diff -urNp linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c
31712 --- linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
31713 +++ linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
31714 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
31715 {
31716 map_word pfow_val[4];
31717
31718 + pax_track_stack();
31719 +
31720 /* Check identification string */
31721 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
31722 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
31723 diff -urNp linux-2.6.32.42/drivers/mtd/mtdchar.c linux-2.6.32.42/drivers/mtd/mtdchar.c
31724 --- linux-2.6.32.42/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
31725 +++ linux-2.6.32.42/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
31726 @@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
31727 u_long size;
31728 struct mtd_info_user info;
31729
31730 + pax_track_stack();
31731 +
31732 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
31733
31734 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
31735 diff -urNp linux-2.6.32.42/drivers/mtd/nftlcore.c linux-2.6.32.42/drivers/mtd/nftlcore.c
31736 --- linux-2.6.32.42/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
31737 +++ linux-2.6.32.42/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
31738 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
31739 int inplace = 1;
31740 size_t retlen;
31741
31742 + pax_track_stack();
31743 +
31744 memset(BlockMap, 0xff, sizeof(BlockMap));
31745 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
31746
31747 diff -urNp linux-2.6.32.42/drivers/mtd/nftlmount.c linux-2.6.32.42/drivers/mtd/nftlmount.c
31748 --- linux-2.6.32.42/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
31749 +++ linux-2.6.32.42/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
31750 @@ -23,6 +23,7 @@
31751 #include <asm/errno.h>
31752 #include <linux/delay.h>
31753 #include <linux/slab.h>
31754 +#include <linux/sched.h>
31755 #include <linux/mtd/mtd.h>
31756 #include <linux/mtd/nand.h>
31757 #include <linux/mtd/nftl.h>
31758 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
31759 struct mtd_info *mtd = nftl->mbd.mtd;
31760 unsigned int i;
31761
31762 + pax_track_stack();
31763 +
31764 /* Assume logical EraseSize == physical erasesize for starting the scan.
31765 We'll sort it out later if we find a MediaHeader which says otherwise */
31766 /* Actually, we won't. The new DiskOnChip driver has already scanned
31767 diff -urNp linux-2.6.32.42/drivers/mtd/ubi/build.c linux-2.6.32.42/drivers/mtd/ubi/build.c
31768 --- linux-2.6.32.42/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
31769 +++ linux-2.6.32.42/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
31770 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
31771 static int __init bytes_str_to_int(const char *str)
31772 {
31773 char *endp;
31774 - unsigned long result;
31775 + unsigned long result, scale = 1;
31776
31777 result = simple_strtoul(str, &endp, 0);
31778 if (str == endp || result >= INT_MAX) {
31779 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
31780
31781 switch (*endp) {
31782 case 'G':
31783 - result *= 1024;
31784 + scale *= 1024;
31785 case 'M':
31786 - result *= 1024;
31787 + scale *= 1024;
31788 case 'K':
31789 - result *= 1024;
31790 + scale *= 1024;
31791 if (endp[1] == 'i' && endp[2] == 'B')
31792 endp += 2;
31793 case '\0':
31794 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
31795 return -EINVAL;
31796 }
31797
31798 - return result;
31799 + if ((intoverflow_t)result*scale >= INT_MAX) {
31800 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
31801 + str);
31802 + return -EINVAL;
31803 + }
31804 +
31805 + return result*scale;
31806 }
31807
31808 /**
31809 diff -urNp linux-2.6.32.42/drivers/net/bnx2.c linux-2.6.32.42/drivers/net/bnx2.c
31810 --- linux-2.6.32.42/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
31811 +++ linux-2.6.32.42/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
31812 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
31813 int rc = 0;
31814 u32 magic, csum;
31815
31816 + pax_track_stack();
31817 +
31818 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
31819 goto test_nvram_done;
31820
31821 diff -urNp linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c
31822 --- linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
31823 +++ linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
31824 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
31825 int i, addr, ret;
31826 struct t3_vpd vpd;
31827
31828 + pax_track_stack();
31829 +
31830 /*
31831 * Card information is normally at VPD_BASE but some early cards had
31832 * it at 0.
31833 diff -urNp linux-2.6.32.42/drivers/net/e1000e/82571.c linux-2.6.32.42/drivers/net/e1000e/82571.c
31834 --- linux-2.6.32.42/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
31835 +++ linux-2.6.32.42/drivers/net/e1000e/82571.c 2011-04-17 15:56:46.000000000 -0400
31836 @@ -212,6 +212,7 @@ static s32 e1000_init_mac_params_82571(s
31837 {
31838 struct e1000_hw *hw = &adapter->hw;
31839 struct e1000_mac_info *mac = &hw->mac;
31840 + /* cannot be const */
31841 struct e1000_mac_operations *func = &mac->ops;
31842 u32 swsm = 0;
31843 u32 swsm2 = 0;
31844 @@ -1656,7 +1657,7 @@ static void e1000_clear_hw_cntrs_82571(s
31845 temp = er32(ICRXDMTC);
31846 }
31847
31848 -static struct e1000_mac_operations e82571_mac_ops = {
31849 +static const struct e1000_mac_operations e82571_mac_ops = {
31850 /* .check_mng_mode: mac type dependent */
31851 /* .check_for_link: media type dependent */
31852 .id_led_init = e1000e_id_led_init,
31853 @@ -1674,7 +1675,7 @@ static struct e1000_mac_operations e8257
31854 .setup_led = e1000e_setup_led_generic,
31855 };
31856
31857 -static struct e1000_phy_operations e82_phy_ops_igp = {
31858 +static const struct e1000_phy_operations e82_phy_ops_igp = {
31859 .acquire_phy = e1000_get_hw_semaphore_82571,
31860 .check_reset_block = e1000e_check_reset_block_generic,
31861 .commit_phy = NULL,
31862 @@ -1691,7 +1692,7 @@ static struct e1000_phy_operations e82_p
31863 .cfg_on_link_up = NULL,
31864 };
31865
31866 -static struct e1000_phy_operations e82_phy_ops_m88 = {
31867 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
31868 .acquire_phy = e1000_get_hw_semaphore_82571,
31869 .check_reset_block = e1000e_check_reset_block_generic,
31870 .commit_phy = e1000e_phy_sw_reset,
31871 @@ -1708,7 +1709,7 @@ static struct e1000_phy_operations e82_p
31872 .cfg_on_link_up = NULL,
31873 };
31874
31875 -static struct e1000_phy_operations e82_phy_ops_bm = {
31876 +static const struct e1000_phy_operations e82_phy_ops_bm = {
31877 .acquire_phy = e1000_get_hw_semaphore_82571,
31878 .check_reset_block = e1000e_check_reset_block_generic,
31879 .commit_phy = e1000e_phy_sw_reset,
31880 @@ -1725,7 +1726,7 @@ static struct e1000_phy_operations e82_p
31881 .cfg_on_link_up = NULL,
31882 };
31883
31884 -static struct e1000_nvm_operations e82571_nvm_ops = {
31885 +static const struct e1000_nvm_operations e82571_nvm_ops = {
31886 .acquire_nvm = e1000_acquire_nvm_82571,
31887 .read_nvm = e1000e_read_nvm_eerd,
31888 .release_nvm = e1000_release_nvm_82571,
31889 diff -urNp linux-2.6.32.42/drivers/net/e1000e/e1000.h linux-2.6.32.42/drivers/net/e1000e/e1000.h
31890 --- linux-2.6.32.42/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
31891 +++ linux-2.6.32.42/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
31892 @@ -375,9 +375,9 @@ struct e1000_info {
31893 u32 pba;
31894 u32 max_hw_frame_size;
31895 s32 (*get_variants)(struct e1000_adapter *);
31896 - struct e1000_mac_operations *mac_ops;
31897 - struct e1000_phy_operations *phy_ops;
31898 - struct e1000_nvm_operations *nvm_ops;
31899 + const struct e1000_mac_operations *mac_ops;
31900 + const struct e1000_phy_operations *phy_ops;
31901 + const struct e1000_nvm_operations *nvm_ops;
31902 };
31903
31904 /* hardware capability, feature, and workaround flags */
31905 diff -urNp linux-2.6.32.42/drivers/net/e1000e/es2lan.c linux-2.6.32.42/drivers/net/e1000e/es2lan.c
31906 --- linux-2.6.32.42/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
31907 +++ linux-2.6.32.42/drivers/net/e1000e/es2lan.c 2011-04-17 15:56:46.000000000 -0400
31908 @@ -207,6 +207,7 @@ static s32 e1000_init_mac_params_80003es
31909 {
31910 struct e1000_hw *hw = &adapter->hw;
31911 struct e1000_mac_info *mac = &hw->mac;
31912 + /* cannot be const */
31913 struct e1000_mac_operations *func = &mac->ops;
31914
31915 /* Set media type */
31916 @@ -1365,7 +1366,7 @@ static void e1000_clear_hw_cntrs_80003es
31917 temp = er32(ICRXDMTC);
31918 }
31919
31920 -static struct e1000_mac_operations es2_mac_ops = {
31921 +static const struct e1000_mac_operations es2_mac_ops = {
31922 .id_led_init = e1000e_id_led_init,
31923 .check_mng_mode = e1000e_check_mng_mode_generic,
31924 /* check_for_link dependent on media type */
31925 @@ -1383,7 +1384,7 @@ static struct e1000_mac_operations es2_m
31926 .setup_led = e1000e_setup_led_generic,
31927 };
31928
31929 -static struct e1000_phy_operations es2_phy_ops = {
31930 +static const struct e1000_phy_operations es2_phy_ops = {
31931 .acquire_phy = e1000_acquire_phy_80003es2lan,
31932 .check_reset_block = e1000e_check_reset_block_generic,
31933 .commit_phy = e1000e_phy_sw_reset,
31934 @@ -1400,7 +1401,7 @@ static struct e1000_phy_operations es2_p
31935 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
31936 };
31937
31938 -static struct e1000_nvm_operations es2_nvm_ops = {
31939 +static const struct e1000_nvm_operations es2_nvm_ops = {
31940 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
31941 .read_nvm = e1000e_read_nvm_eerd,
31942 .release_nvm = e1000_release_nvm_80003es2lan,
31943 diff -urNp linux-2.6.32.42/drivers/net/e1000e/hw.h linux-2.6.32.42/drivers/net/e1000e/hw.h
31944 --- linux-2.6.32.42/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
31945 +++ linux-2.6.32.42/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
31946 @@ -756,34 +756,34 @@ struct e1000_mac_operations {
31947
31948 /* Function pointers for the PHY. */
31949 struct e1000_phy_operations {
31950 - s32 (*acquire_phy)(struct e1000_hw *);
31951 - s32 (*check_polarity)(struct e1000_hw *);
31952 - s32 (*check_reset_block)(struct e1000_hw *);
31953 - s32 (*commit_phy)(struct e1000_hw *);
31954 - s32 (*force_speed_duplex)(struct e1000_hw *);
31955 - s32 (*get_cfg_done)(struct e1000_hw *hw);
31956 - s32 (*get_cable_length)(struct e1000_hw *);
31957 - s32 (*get_phy_info)(struct e1000_hw *);
31958 - s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
31959 - s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
31960 - void (*release_phy)(struct e1000_hw *);
31961 - s32 (*reset_phy)(struct e1000_hw *);
31962 - s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
31963 - s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
31964 - s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
31965 - s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
31966 - s32 (*cfg_on_link_up)(struct e1000_hw *);
31967 + s32 (* acquire_phy)(struct e1000_hw *);
31968 + s32 (* check_polarity)(struct e1000_hw *);
31969 + s32 (* check_reset_block)(struct e1000_hw *);
31970 + s32 (* commit_phy)(struct e1000_hw *);
31971 + s32 (* force_speed_duplex)(struct e1000_hw *);
31972 + s32 (* get_cfg_done)(struct e1000_hw *hw);
31973 + s32 (* get_cable_length)(struct e1000_hw *);
31974 + s32 (* get_phy_info)(struct e1000_hw *);
31975 + s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
31976 + s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
31977 + void (* release_phy)(struct e1000_hw *);
31978 + s32 (* reset_phy)(struct e1000_hw *);
31979 + s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
31980 + s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
31981 + s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
31982 + s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
31983 + s32 (* cfg_on_link_up)(struct e1000_hw *);
31984 };
31985
31986 /* Function pointers for the NVM. */
31987 struct e1000_nvm_operations {
31988 - s32 (*acquire_nvm)(struct e1000_hw *);
31989 - s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
31990 - void (*release_nvm)(struct e1000_hw *);
31991 - s32 (*update_nvm)(struct e1000_hw *);
31992 - s32 (*valid_led_default)(struct e1000_hw *, u16 *);
31993 - s32 (*validate_nvm)(struct e1000_hw *);
31994 - s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
31995 + s32 (* const acquire_nvm)(struct e1000_hw *);
31996 + s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
31997 + void (* const release_nvm)(struct e1000_hw *);
31998 + s32 (* const update_nvm)(struct e1000_hw *);
31999 + s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
32000 + s32 (* const validate_nvm)(struct e1000_hw *);
32001 + s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
32002 };
32003
32004 struct e1000_mac_info {
32005 diff -urNp linux-2.6.32.42/drivers/net/e1000e/ich8lan.c linux-2.6.32.42/drivers/net/e1000e/ich8lan.c
32006 --- linux-2.6.32.42/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
32007 +++ linux-2.6.32.42/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:32.000000000 -0400
32008 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
32009 }
32010 }
32011
32012 -static struct e1000_mac_operations ich8_mac_ops = {
32013 +static const struct e1000_mac_operations ich8_mac_ops = {
32014 .id_led_init = e1000e_id_led_init,
32015 .check_mng_mode = e1000_check_mng_mode_ich8lan,
32016 .check_for_link = e1000_check_for_copper_link_ich8lan,
32017 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
32018 /* id_led_init dependent on mac type */
32019 };
32020
32021 -static struct e1000_phy_operations ich8_phy_ops = {
32022 +static const struct e1000_phy_operations ich8_phy_ops = {
32023 .acquire_phy = e1000_acquire_swflag_ich8lan,
32024 .check_reset_block = e1000_check_reset_block_ich8lan,
32025 .commit_phy = NULL,
32026 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
32027 .write_phy_reg = e1000e_write_phy_reg_igp,
32028 };
32029
32030 -static struct e1000_nvm_operations ich8_nvm_ops = {
32031 +static const struct e1000_nvm_operations ich8_nvm_ops = {
32032 .acquire_nvm = e1000_acquire_nvm_ich8lan,
32033 .read_nvm = e1000_read_nvm_ich8lan,
32034 .release_nvm = e1000_release_nvm_ich8lan,
32035 diff -urNp linux-2.6.32.42/drivers/net/hamradio/6pack.c linux-2.6.32.42/drivers/net/hamradio/6pack.c
32036 --- linux-2.6.32.42/drivers/net/hamradio/6pack.c 2011-03-27 14:31:47.000000000 -0400
32037 +++ linux-2.6.32.42/drivers/net/hamradio/6pack.c 2011-05-16 21:46:57.000000000 -0400
32038 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
32039 unsigned char buf[512];
32040 int count1;
32041
32042 + pax_track_stack();
32043 +
32044 if (!count)
32045 return;
32046
32047 diff -urNp linux-2.6.32.42/drivers/net/ibmveth.c linux-2.6.32.42/drivers/net/ibmveth.c
32048 --- linux-2.6.32.42/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
32049 +++ linux-2.6.32.42/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
32050 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
32051 NULL,
32052 };
32053
32054 -static struct sysfs_ops veth_pool_ops = {
32055 +static const struct sysfs_ops veth_pool_ops = {
32056 .show = veth_pool_show,
32057 .store = veth_pool_store,
32058 };
32059 diff -urNp linux-2.6.32.42/drivers/net/igb/e1000_82575.c linux-2.6.32.42/drivers/net/igb/e1000_82575.c
32060 --- linux-2.6.32.42/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
32061 +++ linux-2.6.32.42/drivers/net/igb/e1000_82575.c 2011-04-17 15:56:46.000000000 -0400
32062 @@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
32063 wr32(E1000_VT_CTL, vt_ctl);
32064 }
32065
32066 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
32067 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
32068 .reset_hw = igb_reset_hw_82575,
32069 .init_hw = igb_init_hw_82575,
32070 .check_for_link = igb_check_for_link_82575,
32071 @@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
32072 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
32073 };
32074
32075 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
32076 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
32077 .acquire = igb_acquire_phy_82575,
32078 .get_cfg_done = igb_get_cfg_done_82575,
32079 .release = igb_release_phy_82575,
32080 };
32081
32082 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
32083 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
32084 .acquire = igb_acquire_nvm_82575,
32085 .read = igb_read_nvm_eerd,
32086 .release = igb_release_nvm_82575,
32087 diff -urNp linux-2.6.32.42/drivers/net/igb/e1000_hw.h linux-2.6.32.42/drivers/net/igb/e1000_hw.h
32088 --- linux-2.6.32.42/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
32089 +++ linux-2.6.32.42/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
32090 @@ -305,17 +305,17 @@ struct e1000_phy_operations {
32091 };
32092
32093 struct e1000_nvm_operations {
32094 - s32 (*acquire)(struct e1000_hw *);
32095 - s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
32096 - void (*release)(struct e1000_hw *);
32097 - s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32098 + s32 (* const acquire)(struct e1000_hw *);
32099 + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
32100 + void (* const release)(struct e1000_hw *);
32101 + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
32102 };
32103
32104 struct e1000_info {
32105 s32 (*get_invariants)(struct e1000_hw *);
32106 - struct e1000_mac_operations *mac_ops;
32107 - struct e1000_phy_operations *phy_ops;
32108 - struct e1000_nvm_operations *nvm_ops;
32109 + const struct e1000_mac_operations *mac_ops;
32110 + const struct e1000_phy_operations *phy_ops;
32111 + const struct e1000_nvm_operations *nvm_ops;
32112 };
32113
32114 extern const struct e1000_info e1000_82575_info;
32115 diff -urNp linux-2.6.32.42/drivers/net/iseries_veth.c linux-2.6.32.42/drivers/net/iseries_veth.c
32116 --- linux-2.6.32.42/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
32117 +++ linux-2.6.32.42/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
32118 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
32119 NULL
32120 };
32121
32122 -static struct sysfs_ops veth_cnx_sysfs_ops = {
32123 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
32124 .show = veth_cnx_attribute_show
32125 };
32126
32127 @@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
32128 NULL
32129 };
32130
32131 -static struct sysfs_ops veth_port_sysfs_ops = {
32132 +static const struct sysfs_ops veth_port_sysfs_ops = {
32133 .show = veth_port_attribute_show
32134 };
32135
32136 diff -urNp linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c
32137 --- linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
32138 +++ linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
32139 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
32140 u32 rctl;
32141 int i;
32142
32143 + pax_track_stack();
32144 +
32145 /* Check for Promiscuous and All Multicast modes */
32146
32147 rctl = IXGB_READ_REG(hw, RCTL);
32148 diff -urNp linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c
32149 --- linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
32150 +++ linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
32151 @@ -260,6 +260,9 @@ void __devinit
32152 ixgb_check_options(struct ixgb_adapter *adapter)
32153 {
32154 int bd = adapter->bd_number;
32155 +
32156 + pax_track_stack();
32157 +
32158 if (bd >= IXGB_MAX_NIC) {
32159 printk(KERN_NOTICE
32160 "Warning: no configuration for board #%i\n", bd);
32161 diff -urNp linux-2.6.32.42/drivers/net/mlx4/main.c linux-2.6.32.42/drivers/net/mlx4/main.c
32162 --- linux-2.6.32.42/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
32163 +++ linux-2.6.32.42/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
32164 @@ -38,6 +38,7 @@
32165 #include <linux/errno.h>
32166 #include <linux/pci.h>
32167 #include <linux/dma-mapping.h>
32168 +#include <linux/sched.h>
32169
32170 #include <linux/mlx4/device.h>
32171 #include <linux/mlx4/doorbell.h>
32172 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
32173 u64 icm_size;
32174 int err;
32175
32176 + pax_track_stack();
32177 +
32178 err = mlx4_QUERY_FW(dev);
32179 if (err) {
32180 if (err == -EACCES)
32181 diff -urNp linux-2.6.32.42/drivers/net/niu.c linux-2.6.32.42/drivers/net/niu.c
32182 --- linux-2.6.32.42/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
32183 +++ linux-2.6.32.42/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
32184 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
32185 int i, num_irqs, err;
32186 u8 first_ldg;
32187
32188 + pax_track_stack();
32189 +
32190 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
32191 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
32192 ldg_num_map[i] = first_ldg + i;
32193 diff -urNp linux-2.6.32.42/drivers/net/pcnet32.c linux-2.6.32.42/drivers/net/pcnet32.c
32194 --- linux-2.6.32.42/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
32195 +++ linux-2.6.32.42/drivers/net/pcnet32.c 2011-04-17 15:56:46.000000000 -0400
32196 @@ -79,7 +79,7 @@ static int cards_found;
32197 /*
32198 * VLB I/O addresses
32199 */
32200 -static unsigned int pcnet32_portlist[] __initdata =
32201 +static unsigned int pcnet32_portlist[] __devinitdata =
32202 { 0x300, 0x320, 0x340, 0x360, 0 };
32203
32204 static int pcnet32_debug = 0;
32205 diff -urNp linux-2.6.32.42/drivers/net/tg3.h linux-2.6.32.42/drivers/net/tg3.h
32206 --- linux-2.6.32.42/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
32207 +++ linux-2.6.32.42/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
32208 @@ -95,6 +95,7 @@
32209 #define CHIPREV_ID_5750_A0 0x4000
32210 #define CHIPREV_ID_5750_A1 0x4001
32211 #define CHIPREV_ID_5750_A3 0x4003
32212 +#define CHIPREV_ID_5750_C1 0x4201
32213 #define CHIPREV_ID_5750_C2 0x4202
32214 #define CHIPREV_ID_5752_A0_HW 0x5000
32215 #define CHIPREV_ID_5752_A0 0x6000
32216 diff -urNp linux-2.6.32.42/drivers/net/tulip/de2104x.c linux-2.6.32.42/drivers/net/tulip/de2104x.c
32217 --- linux-2.6.32.42/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
32218 +++ linux-2.6.32.42/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
32219 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
32220 struct de_srom_info_leaf *il;
32221 void *bufp;
32222
32223 + pax_track_stack();
32224 +
32225 /* download entire eeprom */
32226 for (i = 0; i < DE_EEPROM_WORDS; i++)
32227 ((__le16 *)ee_data)[i] =
32228 diff -urNp linux-2.6.32.42/drivers/net/tulip/de4x5.c linux-2.6.32.42/drivers/net/tulip/de4x5.c
32229 --- linux-2.6.32.42/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
32230 +++ linux-2.6.32.42/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
32231 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
32232 for (i=0; i<ETH_ALEN; i++) {
32233 tmp.addr[i] = dev->dev_addr[i];
32234 }
32235 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32236 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32237 break;
32238
32239 case DE4X5_SET_HWADDR: /* Set the hardware address */
32240 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
32241 spin_lock_irqsave(&lp->lock, flags);
32242 memcpy(&statbuf, &lp->pktStats, ioc->len);
32243 spin_unlock_irqrestore(&lp->lock, flags);
32244 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
32245 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32246 return -EFAULT;
32247 break;
32248 }
32249 diff -urNp linux-2.6.32.42/drivers/net/usb/hso.c linux-2.6.32.42/drivers/net/usb/hso.c
32250 --- linux-2.6.32.42/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
32251 +++ linux-2.6.32.42/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
32252 @@ -71,7 +71,7 @@
32253 #include <asm/byteorder.h>
32254 #include <linux/serial_core.h>
32255 #include <linux/serial.h>
32256 -
32257 +#include <asm/local.h>
32258
32259 #define DRIVER_VERSION "1.2"
32260 #define MOD_AUTHOR "Option Wireless"
32261 @@ -258,7 +258,7 @@ struct hso_serial {
32262
32263 /* from usb_serial_port */
32264 struct tty_struct *tty;
32265 - int open_count;
32266 + local_t open_count;
32267 spinlock_t serial_lock;
32268
32269 int (*write_data) (struct hso_serial *serial);
32270 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
32271 struct urb *urb;
32272
32273 urb = serial->rx_urb[0];
32274 - if (serial->open_count > 0) {
32275 + if (local_read(&serial->open_count) > 0) {
32276 count = put_rxbuf_data(urb, serial);
32277 if (count == -1)
32278 return;
32279 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
32280 DUMP1(urb->transfer_buffer, urb->actual_length);
32281
32282 /* Anyone listening? */
32283 - if (serial->open_count == 0)
32284 + if (local_read(&serial->open_count) == 0)
32285 return;
32286
32287 if (status == 0) {
32288 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
32289 spin_unlock_irq(&serial->serial_lock);
32290
32291 /* check for port already opened, if not set the termios */
32292 - serial->open_count++;
32293 - if (serial->open_count == 1) {
32294 + if (local_inc_return(&serial->open_count) == 1) {
32295 tty->low_latency = 1;
32296 serial->rx_state = RX_IDLE;
32297 /* Force default termio settings */
32298 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
32299 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32300 if (result) {
32301 hso_stop_serial_device(serial->parent);
32302 - serial->open_count--;
32303 + local_dec(&serial->open_count);
32304 kref_put(&serial->parent->ref, hso_serial_ref_free);
32305 }
32306 } else {
32307 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
32308
32309 /* reset the rts and dtr */
32310 /* do the actual close */
32311 - serial->open_count--;
32312 + local_dec(&serial->open_count);
32313
32314 - if (serial->open_count <= 0) {
32315 - serial->open_count = 0;
32316 + if (local_read(&serial->open_count) <= 0) {
32317 + local_set(&serial->open_count, 0);
32318 spin_lock_irq(&serial->serial_lock);
32319 if (serial->tty == tty) {
32320 serial->tty->driver_data = NULL;
32321 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
32322
32323 /* the actual setup */
32324 spin_lock_irqsave(&serial->serial_lock, flags);
32325 - if (serial->open_count)
32326 + if (local_read(&serial->open_count))
32327 _hso_serial_set_termios(tty, old);
32328 else
32329 tty->termios = old;
32330 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
32331 /* Start all serial ports */
32332 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32333 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32334 - if (dev2ser(serial_table[i])->open_count) {
32335 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
32336 result =
32337 hso_start_serial_device(serial_table[i], GFP_NOIO);
32338 hso_kick_transmit(dev2ser(serial_table[i]));
32339 diff -urNp linux-2.6.32.42/drivers/net/vxge/vxge-main.c linux-2.6.32.42/drivers/net/vxge/vxge-main.c
32340 --- linux-2.6.32.42/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
32341 +++ linux-2.6.32.42/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
32342 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32343 struct sk_buff *completed[NR_SKB_COMPLETED];
32344 int more;
32345
32346 + pax_track_stack();
32347 +
32348 do {
32349 more = 0;
32350 skb_ptr = completed;
32351 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
32352 u8 mtable[256] = {0}; /* CPU to vpath mapping */
32353 int index;
32354
32355 + pax_track_stack();
32356 +
32357 /*
32358 * Filling
32359 * - itable with bucket numbers
32360 diff -urNp linux-2.6.32.42/drivers/net/wan/cycx_x25.c linux-2.6.32.42/drivers/net/wan/cycx_x25.c
32361 --- linux-2.6.32.42/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
32362 +++ linux-2.6.32.42/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
32363 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
32364 unsigned char hex[1024],
32365 * phex = hex;
32366
32367 + pax_track_stack();
32368 +
32369 if (len >= (sizeof(hex) / 2))
32370 len = (sizeof(hex) / 2) - 1;
32371
32372 diff -urNp linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c
32373 --- linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
32374 +++ linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
32375 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
32376 int do_autopm = 1;
32377 DECLARE_COMPLETION_ONSTACK(notif_completion);
32378
32379 + pax_track_stack();
32380 +
32381 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
32382 i2400m, ack, ack_size);
32383 BUG_ON(_ack == i2400m->bm_ack_buf);
32384 diff -urNp linux-2.6.32.42/drivers/net/wireless/airo.c linux-2.6.32.42/drivers/net/wireless/airo.c
32385 --- linux-2.6.32.42/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
32386 +++ linux-2.6.32.42/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
32387 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
32388 BSSListElement * loop_net;
32389 BSSListElement * tmp_net;
32390
32391 + pax_track_stack();
32392 +
32393 /* Blow away current list of scan results */
32394 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
32395 list_move_tail (&loop_net->list, &ai->network_free_list);
32396 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
32397 WepKeyRid wkr;
32398 int rc;
32399
32400 + pax_track_stack();
32401 +
32402 memset( &mySsid, 0, sizeof( mySsid ) );
32403 kfree (ai->flash);
32404 ai->flash = NULL;
32405 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
32406 __le32 *vals = stats.vals;
32407 int len;
32408
32409 + pax_track_stack();
32410 +
32411 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32412 return -ENOMEM;
32413 data = (struct proc_data *)file->private_data;
32414 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
32415 /* If doLoseSync is not 1, we won't do a Lose Sync */
32416 int doLoseSync = -1;
32417
32418 + pax_track_stack();
32419 +
32420 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32421 return -ENOMEM;
32422 data = (struct proc_data *)file->private_data;
32423 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
32424 int i;
32425 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
32426
32427 + pax_track_stack();
32428 +
32429 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
32430 if (!qual)
32431 return -ENOMEM;
32432 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
32433 CapabilityRid cap_rid;
32434 __le32 *vals = stats_rid.vals;
32435
32436 + pax_track_stack();
32437 +
32438 /* Get stats out of the card */
32439 clear_bit(JOB_WSTATS, &local->jobs);
32440 if (local->power.event) {
32441 diff -urNp linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c
32442 --- linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
32443 +++ linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
32444 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
32445 unsigned int v;
32446 u64 tsf;
32447
32448 + pax_track_stack();
32449 +
32450 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
32451 len += snprintf(buf+len, sizeof(buf)-len,
32452 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
32453 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
32454 unsigned int len = 0;
32455 unsigned int i;
32456
32457 + pax_track_stack();
32458 +
32459 len += snprintf(buf+len, sizeof(buf)-len,
32460 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
32461
32462 diff -urNp linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c
32463 --- linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
32464 +++ linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
32465 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
32466 char buf[512];
32467 unsigned int len = 0;
32468
32469 + pax_track_stack();
32470 +
32471 len += snprintf(buf + len, sizeof(buf) - len,
32472 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
32473 len += snprintf(buf + len, sizeof(buf) - len,
32474 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
32475 int i;
32476 u8 addr[ETH_ALEN];
32477
32478 + pax_track_stack();
32479 +
32480 len += snprintf(buf + len, sizeof(buf) - len,
32481 "primary: %s (%s chan=%d ht=%d)\n",
32482 wiphy_name(sc->pri_wiphy->hw->wiphy),
32483 diff -urNp linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c
32484 --- linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32485 +++ linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32486 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
32487 struct b43_debugfs_fops {
32488 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
32489 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
32490 - struct file_operations fops;
32491 + const struct file_operations fops;
32492 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
32493 size_t file_struct_offset;
32494 };
32495 diff -urNp linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c
32496 --- linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32497 +++ linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32498 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
32499 struct b43legacy_debugfs_fops {
32500 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
32501 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
32502 - struct file_operations fops;
32503 + const struct file_operations fops;
32504 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
32505 size_t file_struct_offset;
32506 /* Take wl->irq_lock before calling read/write? */
32507 diff -urNp linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c
32508 --- linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
32509 +++ linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
32510 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
32511 int err;
32512 DECLARE_SSID_BUF(ssid);
32513
32514 + pax_track_stack();
32515 +
32516 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
32517
32518 if (ssid_len)
32519 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
32520 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
32521 int err;
32522
32523 + pax_track_stack();
32524 +
32525 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
32526 idx, keylen, len);
32527
32528 diff -urNp linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c
32529 --- linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
32530 +++ linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
32531 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
32532 unsigned long flags;
32533 DECLARE_SSID_BUF(ssid);
32534
32535 + pax_track_stack();
32536 +
32537 LIBIPW_DEBUG_SCAN("'%s' (%pM"
32538 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
32539 print_ssid(ssid, info_element->data, info_element->len),
32540 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c
32541 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
32542 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
32543 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
32544 },
32545 };
32546
32547 -static struct iwl_ops iwl1000_ops = {
32548 +static const struct iwl_ops iwl1000_ops = {
32549 .ucode = &iwl5000_ucode,
32550 .lib = &iwl1000_lib,
32551 .hcmd = &iwl5000_hcmd,
32552 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c
32553 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
32554 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
32555 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
32556 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
32557 };
32558
32559 -static struct iwl_ops iwl3945_ops = {
32560 +static const struct iwl_ops iwl3945_ops = {
32561 .ucode = &iwl3945_ucode,
32562 .lib = &iwl3945_lib,
32563 .hcmd = &iwl3945_hcmd,
32564 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c
32565 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
32566 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
32567 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
32568 },
32569 };
32570
32571 -static struct iwl_ops iwl4965_ops = {
32572 +static const struct iwl_ops iwl4965_ops = {
32573 .ucode = &iwl4965_ucode,
32574 .lib = &iwl4965_lib,
32575 .hcmd = &iwl4965_hcmd,
32576 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c
32577 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
32578 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
32579 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
32580 },
32581 };
32582
32583 -struct iwl_ops iwl5000_ops = {
32584 +const struct iwl_ops iwl5000_ops = {
32585 .ucode = &iwl5000_ucode,
32586 .lib = &iwl5000_lib,
32587 .hcmd = &iwl5000_hcmd,
32588 .utils = &iwl5000_hcmd_utils,
32589 };
32590
32591 -static struct iwl_ops iwl5150_ops = {
32592 +static const struct iwl_ops iwl5150_ops = {
32593 .ucode = &iwl5000_ucode,
32594 .lib = &iwl5150_lib,
32595 .hcmd = &iwl5000_hcmd,
32596 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c
32597 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
32598 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
32599 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
32600 .calc_rssi = iwl5000_calc_rssi,
32601 };
32602
32603 -static struct iwl_ops iwl6000_ops = {
32604 +static const struct iwl_ops iwl6000_ops = {
32605 .ucode = &iwl5000_ucode,
32606 .lib = &iwl6000_lib,
32607 .hcmd = &iwl5000_hcmd,
32608 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
32609 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
32610 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
32611 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
32612 u8 active_index = 0;
32613 s32 tpt = 0;
32614
32615 + pax_track_stack();
32616 +
32617 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
32618
32619 if (!ieee80211_is_data(hdr->frame_control) ||
32620 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
32621 u8 valid_tx_ant = 0;
32622 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
32623
32624 + pax_track_stack();
32625 +
32626 /* Override starting rate (index 0) if needed for debug purposes */
32627 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
32628
32629 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c
32630 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
32631 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
32632 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
32633 int pos = 0;
32634 const size_t bufsz = sizeof(buf);
32635
32636 + pax_track_stack();
32637 +
32638 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
32639 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
32640 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
32641 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
32642 const size_t bufsz = sizeof(buf);
32643 ssize_t ret;
32644
32645 + pax_track_stack();
32646 +
32647 for (i = 0; i < AC_NUM; i++) {
32648 pos += scnprintf(buf + pos, bufsz - pos,
32649 "\tcw_min\tcw_max\taifsn\ttxop\n");
32650 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h
32651 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
32652 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
32653 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
32654 #endif
32655
32656 #else
32657 -#define IWL_DEBUG(__priv, level, fmt, args...)
32658 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
32659 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
32660 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
32661 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
32662 void *p, u32 len)
32663 {}
32664 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h
32665 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
32666 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
32667 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
32668
32669 /* shared structures from iwl-5000.c */
32670 extern struct iwl_mod_params iwl50_mod_params;
32671 -extern struct iwl_ops iwl5000_ops;
32672 +extern const struct iwl_ops iwl5000_ops;
32673 extern struct iwl_ucode_ops iwl5000_ucode;
32674 extern struct iwl_lib_ops iwl5000_lib;
32675 extern struct iwl_hcmd_ops iwl5000_hcmd;
32676 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c
32677 --- linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32678 +++ linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
32679 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
32680 int buf_len = 512;
32681 size_t len = 0;
32682
32683 + pax_track_stack();
32684 +
32685 if (*ppos != 0)
32686 return 0;
32687 if (count < sizeof(buf))
32688 diff -urNp linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c
32689 --- linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32690 +++ linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32691 @@ -708,7 +708,7 @@ out_unlock:
32692 struct lbs_debugfs_files {
32693 const char *name;
32694 int perm;
32695 - struct file_operations fops;
32696 + const struct file_operations fops;
32697 };
32698
32699 static const struct lbs_debugfs_files debugfs_files[] = {
32700 diff -urNp linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c
32701 --- linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
32702 +++ linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
32703 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
32704
32705 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
32706
32707 - if (rts_threshold < 0 || rts_threshold > 2347)
32708 + if (rts_threshold > 2347)
32709 rts_threshold = 2347;
32710
32711 tmp = cpu_to_le32(rts_threshold);
32712 diff -urNp linux-2.6.32.42/drivers/oprofile/buffer_sync.c linux-2.6.32.42/drivers/oprofile/buffer_sync.c
32713 --- linux-2.6.32.42/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
32714 +++ linux-2.6.32.42/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
32715 @@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
32716 if (cookie == NO_COOKIE)
32717 offset = pc;
32718 if (cookie == INVALID_COOKIE) {
32719 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32720 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32721 offset = pc;
32722 }
32723 if (cookie != last_cookie) {
32724 @@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
32725 /* add userspace sample */
32726
32727 if (!mm) {
32728 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
32729 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
32730 return 0;
32731 }
32732
32733 cookie = lookup_dcookie(mm, s->eip, &offset);
32734
32735 if (cookie == INVALID_COOKIE) {
32736 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32737 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32738 return 0;
32739 }
32740
32741 @@ -561,7 +561,7 @@ void sync_buffer(int cpu)
32742 /* ignore backtraces if failed to add a sample */
32743 if (state == sb_bt_start) {
32744 state = sb_bt_ignore;
32745 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
32746 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
32747 }
32748 }
32749 release_mm(mm);
32750 diff -urNp linux-2.6.32.42/drivers/oprofile/event_buffer.c linux-2.6.32.42/drivers/oprofile/event_buffer.c
32751 --- linux-2.6.32.42/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
32752 +++ linux-2.6.32.42/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
32753 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
32754 }
32755
32756 if (buffer_pos == buffer_size) {
32757 - atomic_inc(&oprofile_stats.event_lost_overflow);
32758 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
32759 return;
32760 }
32761
32762 diff -urNp linux-2.6.32.42/drivers/oprofile/oprof.c linux-2.6.32.42/drivers/oprofile/oprof.c
32763 --- linux-2.6.32.42/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
32764 +++ linux-2.6.32.42/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
32765 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
32766 if (oprofile_ops.switch_events())
32767 return;
32768
32769 - atomic_inc(&oprofile_stats.multiplex_counter);
32770 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
32771 start_switch_worker();
32772 }
32773
32774 diff -urNp linux-2.6.32.42/drivers/oprofile/oprofilefs.c linux-2.6.32.42/drivers/oprofile/oprofilefs.c
32775 --- linux-2.6.32.42/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
32776 +++ linux-2.6.32.42/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
32777 @@ -187,7 +187,7 @@ static const struct file_operations atom
32778
32779
32780 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
32781 - char const *name, atomic_t *val)
32782 + char const *name, atomic_unchecked_t *val)
32783 {
32784 struct dentry *d = __oprofilefs_create_file(sb, root, name,
32785 &atomic_ro_fops, 0444);
32786 diff -urNp linux-2.6.32.42/drivers/oprofile/oprofile_stats.c linux-2.6.32.42/drivers/oprofile/oprofile_stats.c
32787 --- linux-2.6.32.42/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
32788 +++ linux-2.6.32.42/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
32789 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
32790 cpu_buf->sample_invalid_eip = 0;
32791 }
32792
32793 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
32794 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
32795 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
32796 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
32797 - atomic_set(&oprofile_stats.multiplex_counter, 0);
32798 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
32799 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
32800 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
32801 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
32802 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
32803 }
32804
32805
32806 diff -urNp linux-2.6.32.42/drivers/oprofile/oprofile_stats.h linux-2.6.32.42/drivers/oprofile/oprofile_stats.h
32807 --- linux-2.6.32.42/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
32808 +++ linux-2.6.32.42/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
32809 @@ -13,11 +13,11 @@
32810 #include <asm/atomic.h>
32811
32812 struct oprofile_stat_struct {
32813 - atomic_t sample_lost_no_mm;
32814 - atomic_t sample_lost_no_mapping;
32815 - atomic_t bt_lost_no_mapping;
32816 - atomic_t event_lost_overflow;
32817 - atomic_t multiplex_counter;
32818 + atomic_unchecked_t sample_lost_no_mm;
32819 + atomic_unchecked_t sample_lost_no_mapping;
32820 + atomic_unchecked_t bt_lost_no_mapping;
32821 + atomic_unchecked_t event_lost_overflow;
32822 + atomic_unchecked_t multiplex_counter;
32823 };
32824
32825 extern struct oprofile_stat_struct oprofile_stats;
32826 diff -urNp linux-2.6.32.42/drivers/parisc/pdc_stable.c linux-2.6.32.42/drivers/parisc/pdc_stable.c
32827 --- linux-2.6.32.42/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
32828 +++ linux-2.6.32.42/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
32829 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
32830 return ret;
32831 }
32832
32833 -static struct sysfs_ops pdcspath_attr_ops = {
32834 +static const struct sysfs_ops pdcspath_attr_ops = {
32835 .show = pdcspath_attr_show,
32836 .store = pdcspath_attr_store,
32837 };
32838 diff -urNp linux-2.6.32.42/drivers/parport/procfs.c linux-2.6.32.42/drivers/parport/procfs.c
32839 --- linux-2.6.32.42/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
32840 +++ linux-2.6.32.42/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
32841 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
32842
32843 *ppos += len;
32844
32845 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
32846 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
32847 }
32848
32849 #ifdef CONFIG_PARPORT_1284
32850 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
32851
32852 *ppos += len;
32853
32854 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
32855 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
32856 }
32857 #endif /* IEEE1284.3 support. */
32858
32859 diff -urNp linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c
32860 --- linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
32861 +++ linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
32862 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
32863 }
32864
32865
32866 -static struct acpi_dock_ops acpiphp_dock_ops = {
32867 +static const struct acpi_dock_ops acpiphp_dock_ops = {
32868 .handler = handle_hotplug_event_func,
32869 };
32870
32871 diff -urNp linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c
32872 --- linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
32873 +++ linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
32874 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
32875
32876 void compaq_nvram_init (void __iomem *rom_start)
32877 {
32878 +
32879 +#ifndef CONFIG_PAX_KERNEXEC
32880 if (rom_start) {
32881 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
32882 }
32883 +#endif
32884 +
32885 dbg("int15 entry = %p\n", compaq_int15_entry_point);
32886
32887 /* initialize our int15 lock */
32888 diff -urNp linux-2.6.32.42/drivers/pci/hotplug/fakephp.c linux-2.6.32.42/drivers/pci/hotplug/fakephp.c
32889 --- linux-2.6.32.42/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
32890 +++ linux-2.6.32.42/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
32891 @@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
32892 }
32893
32894 static struct kobj_type legacy_ktype = {
32895 - .sysfs_ops = &(struct sysfs_ops){
32896 + .sysfs_ops = &(const struct sysfs_ops){
32897 .store = legacy_store, .show = legacy_show
32898 },
32899 .release = &legacy_release,
32900 diff -urNp linux-2.6.32.42/drivers/pci/intel-iommu.c linux-2.6.32.42/drivers/pci/intel-iommu.c
32901 --- linux-2.6.32.42/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
32902 +++ linux-2.6.32.42/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
32903 @@ -2643,7 +2643,7 @@ error:
32904 return 0;
32905 }
32906
32907 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
32908 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
32909 unsigned long offset, size_t size,
32910 enum dma_data_direction dir,
32911 struct dma_attrs *attrs)
32912 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
32913 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
32914 }
32915
32916 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
32917 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
32918 size_t size, enum dma_data_direction dir,
32919 struct dma_attrs *attrs)
32920 {
32921 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
32922 }
32923 }
32924
32925 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
32926 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
32927 dma_addr_t *dma_handle, gfp_t flags)
32928 {
32929 void *vaddr;
32930 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
32931 return NULL;
32932 }
32933
32934 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
32935 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
32936 dma_addr_t dma_handle)
32937 {
32938 int order;
32939 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
32940 free_pages((unsigned long)vaddr, order);
32941 }
32942
32943 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
32944 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
32945 int nelems, enum dma_data_direction dir,
32946 struct dma_attrs *attrs)
32947 {
32948 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
32949 return nelems;
32950 }
32951
32952 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
32953 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
32954 enum dma_data_direction dir, struct dma_attrs *attrs)
32955 {
32956 int i;
32957 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
32958 return nelems;
32959 }
32960
32961 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
32962 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
32963 {
32964 return !dma_addr;
32965 }
32966
32967 -struct dma_map_ops intel_dma_ops = {
32968 +const struct dma_map_ops intel_dma_ops = {
32969 .alloc_coherent = intel_alloc_coherent,
32970 .free_coherent = intel_free_coherent,
32971 .map_sg = intel_map_sg,
32972 diff -urNp linux-2.6.32.42/drivers/pci/pcie/aspm.c linux-2.6.32.42/drivers/pci/pcie/aspm.c
32973 --- linux-2.6.32.42/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
32974 +++ linux-2.6.32.42/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
32975 @@ -27,9 +27,9 @@
32976 #define MODULE_PARAM_PREFIX "pcie_aspm."
32977
32978 /* Note: those are not register definitions */
32979 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
32980 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
32981 -#define ASPM_STATE_L1 (4) /* L1 state */
32982 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
32983 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
32984 +#define ASPM_STATE_L1 (4U) /* L1 state */
32985 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
32986 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
32987
32988 diff -urNp linux-2.6.32.42/drivers/pci/probe.c linux-2.6.32.42/drivers/pci/probe.c
32989 --- linux-2.6.32.42/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
32990 +++ linux-2.6.32.42/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
32991 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
32992 return ret;
32993 }
32994
32995 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
32996 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
32997 struct device_attribute *attr,
32998 char *buf)
32999 {
33000 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
33001 }
33002
33003 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
33004 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
33005 struct device_attribute *attr,
33006 char *buf)
33007 {
33008 diff -urNp linux-2.6.32.42/drivers/pci/proc.c linux-2.6.32.42/drivers/pci/proc.c
33009 --- linux-2.6.32.42/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
33010 +++ linux-2.6.32.42/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
33011 @@ -480,7 +480,16 @@ static const struct file_operations proc
33012 static int __init pci_proc_init(void)
33013 {
33014 struct pci_dev *dev = NULL;
33015 +
33016 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
33017 +#ifdef CONFIG_GRKERNSEC_PROC_USER
33018 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33019 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33020 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33021 +#endif
33022 +#else
33023 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33024 +#endif
33025 proc_create("devices", 0, proc_bus_pci_dir,
33026 &proc_bus_pci_dev_operations);
33027 proc_initialized = 1;
33028 diff -urNp linux-2.6.32.42/drivers/pci/slot.c linux-2.6.32.42/drivers/pci/slot.c
33029 --- linux-2.6.32.42/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
33030 +++ linux-2.6.32.42/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
33031 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
33032 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
33033 }
33034
33035 -static struct sysfs_ops pci_slot_sysfs_ops = {
33036 +static const struct sysfs_ops pci_slot_sysfs_ops = {
33037 .show = pci_slot_attr_show,
33038 .store = pci_slot_attr_store,
33039 };
33040 diff -urNp linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c
33041 --- linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
33042 +++ linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
33043 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
33044 return -EFAULT;
33045 }
33046 }
33047 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
33048 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
33049 if (!buf)
33050 return -ENOMEM;
33051
33052 diff -urNp linux-2.6.32.42/drivers/platform/x86/acer-wmi.c linux-2.6.32.42/drivers/platform/x86/acer-wmi.c
33053 --- linux-2.6.32.42/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
33054 +++ linux-2.6.32.42/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
33055 @@ -918,7 +918,7 @@ static int update_bl_status(struct backl
33056 return 0;
33057 }
33058
33059 -static struct backlight_ops acer_bl_ops = {
33060 +static const struct backlight_ops acer_bl_ops = {
33061 .get_brightness = read_brightness,
33062 .update_status = update_bl_status,
33063 };
33064 diff -urNp linux-2.6.32.42/drivers/platform/x86/asus_acpi.c linux-2.6.32.42/drivers/platform/x86/asus_acpi.c
33065 --- linux-2.6.32.42/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
33066 +++ linux-2.6.32.42/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
33067 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
33068 return 0;
33069 }
33070
33071 -static struct backlight_ops asus_backlight_data = {
33072 +static const struct backlight_ops asus_backlight_data = {
33073 .get_brightness = read_brightness,
33074 .update_status = set_brightness_status,
33075 };
33076 diff -urNp linux-2.6.32.42/drivers/platform/x86/asus-laptop.c linux-2.6.32.42/drivers/platform/x86/asus-laptop.c
33077 --- linux-2.6.32.42/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
33078 +++ linux-2.6.32.42/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
33079 @@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
33080 */
33081 static int read_brightness(struct backlight_device *bd);
33082 static int update_bl_status(struct backlight_device *bd);
33083 -static struct backlight_ops asusbl_ops = {
33084 +static const struct backlight_ops asusbl_ops = {
33085 .get_brightness = read_brightness,
33086 .update_status = update_bl_status,
33087 };
33088 diff -urNp linux-2.6.32.42/drivers/platform/x86/compal-laptop.c linux-2.6.32.42/drivers/platform/x86/compal-laptop.c
33089 --- linux-2.6.32.42/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
33090 +++ linux-2.6.32.42/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
33091 @@ -163,7 +163,7 @@ static int bl_update_status(struct backl
33092 return set_lcd_level(b->props.brightness);
33093 }
33094
33095 -static struct backlight_ops compalbl_ops = {
33096 +static const struct backlight_ops compalbl_ops = {
33097 .get_brightness = bl_get_brightness,
33098 .update_status = bl_update_status,
33099 };
33100 diff -urNp linux-2.6.32.42/drivers/platform/x86/dell-laptop.c linux-2.6.32.42/drivers/platform/x86/dell-laptop.c
33101 --- linux-2.6.32.42/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
33102 +++ linux-2.6.32.42/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
33103 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
33104 return buffer.output[1];
33105 }
33106
33107 -static struct backlight_ops dell_ops = {
33108 +static const struct backlight_ops dell_ops = {
33109 .get_brightness = dell_get_intensity,
33110 .update_status = dell_send_intensity,
33111 };
33112 diff -urNp linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c
33113 --- linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
33114 +++ linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
33115 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
33116 */
33117 static int read_brightness(struct backlight_device *bd);
33118 static int update_bl_status(struct backlight_device *bd);
33119 -static struct backlight_ops eeepcbl_ops = {
33120 +static const struct backlight_ops eeepcbl_ops = {
33121 .get_brightness = read_brightness,
33122 .update_status = update_bl_status,
33123 };
33124 diff -urNp linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c
33125 --- linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
33126 +++ linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
33127 @@ -436,7 +436,7 @@ static int bl_update_status(struct backl
33128 return ret;
33129 }
33130
33131 -static struct backlight_ops fujitsubl_ops = {
33132 +static const struct backlight_ops fujitsubl_ops = {
33133 .get_brightness = bl_get_brightness,
33134 .update_status = bl_update_status,
33135 };
33136 diff -urNp linux-2.6.32.42/drivers/platform/x86/msi-laptop.c linux-2.6.32.42/drivers/platform/x86/msi-laptop.c
33137 --- linux-2.6.32.42/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
33138 +++ linux-2.6.32.42/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
33139 @@ -161,7 +161,7 @@ static int bl_update_status(struct backl
33140 return set_lcd_level(b->props.brightness);
33141 }
33142
33143 -static struct backlight_ops msibl_ops = {
33144 +static const struct backlight_ops msibl_ops = {
33145 .get_brightness = bl_get_brightness,
33146 .update_status = bl_update_status,
33147 };
33148 diff -urNp linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c
33149 --- linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
33150 +++ linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
33151 @@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
33152 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
33153 }
33154
33155 -static struct backlight_ops pcc_backlight_ops = {
33156 +static const struct backlight_ops pcc_backlight_ops = {
33157 .get_brightness = bl_get,
33158 .update_status = bl_set_status,
33159 };
33160 diff -urNp linux-2.6.32.42/drivers/platform/x86/sony-laptop.c linux-2.6.32.42/drivers/platform/x86/sony-laptop.c
33161 --- linux-2.6.32.42/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
33162 +++ linux-2.6.32.42/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
33163 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
33164 }
33165
33166 static struct backlight_device *sony_backlight_device;
33167 -static struct backlight_ops sony_backlight_ops = {
33168 +static const struct backlight_ops sony_backlight_ops = {
33169 .update_status = sony_backlight_update_status,
33170 .get_brightness = sony_backlight_get_brightness,
33171 };
33172 diff -urNp linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c
33173 --- linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
33174 +++ linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c 2011-04-17 15:56:46.000000000 -0400
33175 @@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
33176 BACKLIGHT_UPDATE_HOTKEY);
33177 }
33178
33179 -static struct backlight_ops ibm_backlight_data = {
33180 +static const struct backlight_ops ibm_backlight_data = {
33181 .get_brightness = brightness_get,
33182 .update_status = brightness_update_status,
33183 };
33184 diff -urNp linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c
33185 --- linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
33186 +++ linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
33187 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
33188 return AE_OK;
33189 }
33190
33191 -static struct backlight_ops toshiba_backlight_data = {
33192 +static const struct backlight_ops toshiba_backlight_data = {
33193 .get_brightness = get_lcd,
33194 .update_status = set_lcd_status,
33195 };
33196 diff -urNp linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c
33197 --- linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
33198 +++ linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
33199 @@ -60,7 +60,7 @@ do { \
33200 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33201 } while(0)
33202
33203 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33204 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33205 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33206
33207 /*
33208 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
33209
33210 cpu = get_cpu();
33211 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33212 +
33213 + pax_open_kernel();
33214 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33215 + pax_close_kernel();
33216
33217 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33218 spin_lock_irqsave(&pnp_bios_lock, flags);
33219 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
33220 :"memory");
33221 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33222
33223 + pax_open_kernel();
33224 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33225 + pax_close_kernel();
33226 +
33227 put_cpu();
33228
33229 /* If we get here and this is set then the PnP BIOS faulted on us. */
33230 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
33231 return status;
33232 }
33233
33234 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
33235 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33236 {
33237 int i;
33238
33239 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
33240 pnp_bios_callpoint.offset = header->fields.pm16offset;
33241 pnp_bios_callpoint.segment = PNP_CS16;
33242
33243 + pax_open_kernel();
33244 +
33245 for_each_possible_cpu(i) {
33246 struct desc_struct *gdt = get_cpu_gdt_table(i);
33247 if (!gdt)
33248 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
33249 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33250 (unsigned long)__va(header->fields.pm16dseg));
33251 }
33252 +
33253 + pax_close_kernel();
33254 }
33255 diff -urNp linux-2.6.32.42/drivers/pnp/resource.c linux-2.6.32.42/drivers/pnp/resource.c
33256 --- linux-2.6.32.42/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
33257 +++ linux-2.6.32.42/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
33258 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
33259 return 1;
33260
33261 /* check if the resource is valid */
33262 - if (*irq < 0 || *irq > 15)
33263 + if (*irq > 15)
33264 return 0;
33265
33266 /* check if the resource is reserved */
33267 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
33268 return 1;
33269
33270 /* check if the resource is valid */
33271 - if (*dma < 0 || *dma == 4 || *dma > 7)
33272 + if (*dma == 4 || *dma > 7)
33273 return 0;
33274
33275 /* check if the resource is reserved */
33276 diff -urNp linux-2.6.32.42/drivers/rtc/rtc-dev.c linux-2.6.32.42/drivers/rtc/rtc-dev.c
33277 --- linux-2.6.32.42/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
33278 +++ linux-2.6.32.42/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
33279 @@ -14,6 +14,7 @@
33280 #include <linux/module.h>
33281 #include <linux/rtc.h>
33282 #include <linux/sched.h>
33283 +#include <linux/grsecurity.h>
33284 #include "rtc-core.h"
33285
33286 static dev_t rtc_devt;
33287 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
33288 if (copy_from_user(&tm, uarg, sizeof(tm)))
33289 return -EFAULT;
33290
33291 + gr_log_timechange();
33292 +
33293 return rtc_set_time(rtc, &tm);
33294
33295 case RTC_PIE_ON:
33296 diff -urNp linux-2.6.32.42/drivers/s390/cio/qdio_perf.c linux-2.6.32.42/drivers/s390/cio/qdio_perf.c
33297 --- linux-2.6.32.42/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
33298 +++ linux-2.6.32.42/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
33299 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
33300 static int qdio_perf_proc_show(struct seq_file *m, void *v)
33301 {
33302 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
33303 - (long)atomic_long_read(&perf_stats.qdio_int));
33304 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
33305 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
33306 - (long)atomic_long_read(&perf_stats.pci_int));
33307 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
33308 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
33309 - (long)atomic_long_read(&perf_stats.thin_int));
33310 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
33311 seq_printf(m, "\n");
33312 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
33313 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
33314 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
33315 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
33316 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
33317 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
33318 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
33319 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
33320 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
33321 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
33322 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
33323 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
33324 - (long)atomic_long_read(&perf_stats.thinint_inbound),
33325 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
33326 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
33327 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
33328 seq_printf(m, "\n");
33329 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
33330 - (long)atomic_long_read(&perf_stats.siga_in));
33331 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
33332 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
33333 - (long)atomic_long_read(&perf_stats.siga_out));
33334 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
33335 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
33336 - (long)atomic_long_read(&perf_stats.siga_sync));
33337 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
33338 seq_printf(m, "\n");
33339 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
33340 - (long)atomic_long_read(&perf_stats.inbound_handler));
33341 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
33342 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
33343 - (long)atomic_long_read(&perf_stats.outbound_handler));
33344 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
33345 seq_printf(m, "\n");
33346 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
33347 - (long)atomic_long_read(&perf_stats.fast_requeue));
33348 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
33349 seq_printf(m, "Number of outbound target full condition\t: %li\n",
33350 - (long)atomic_long_read(&perf_stats.outbound_target_full));
33351 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
33352 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
33353 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
33354 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
33355 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
33356 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
33357 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
33358 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
33359 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
33360 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
33361 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
33362 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
33363 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
33364 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
33365 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
33366 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
33367 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
33368 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
33369 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
33370 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
33371 seq_printf(m, "\n");
33372 return 0;
33373 }
33374 diff -urNp linux-2.6.32.42/drivers/s390/cio/qdio_perf.h linux-2.6.32.42/drivers/s390/cio/qdio_perf.h
33375 --- linux-2.6.32.42/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
33376 +++ linux-2.6.32.42/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
33377 @@ -13,46 +13,46 @@
33378
33379 struct qdio_perf_stats {
33380 /* interrupt handler calls */
33381 - atomic_long_t qdio_int;
33382 - atomic_long_t pci_int;
33383 - atomic_long_t thin_int;
33384 + atomic_long_unchecked_t qdio_int;
33385 + atomic_long_unchecked_t pci_int;
33386 + atomic_long_unchecked_t thin_int;
33387
33388 /* tasklet runs */
33389 - atomic_long_t tasklet_inbound;
33390 - atomic_long_t tasklet_outbound;
33391 - atomic_long_t tasklet_thinint;
33392 - atomic_long_t tasklet_thinint_loop;
33393 - atomic_long_t thinint_inbound;
33394 - atomic_long_t thinint_inbound_loop;
33395 - atomic_long_t thinint_inbound_loop2;
33396 + atomic_long_unchecked_t tasklet_inbound;
33397 + atomic_long_unchecked_t tasklet_outbound;
33398 + atomic_long_unchecked_t tasklet_thinint;
33399 + atomic_long_unchecked_t tasklet_thinint_loop;
33400 + atomic_long_unchecked_t thinint_inbound;
33401 + atomic_long_unchecked_t thinint_inbound_loop;
33402 + atomic_long_unchecked_t thinint_inbound_loop2;
33403
33404 /* signal adapter calls */
33405 - atomic_long_t siga_out;
33406 - atomic_long_t siga_in;
33407 - atomic_long_t siga_sync;
33408 + atomic_long_unchecked_t siga_out;
33409 + atomic_long_unchecked_t siga_in;
33410 + atomic_long_unchecked_t siga_sync;
33411
33412 /* misc */
33413 - atomic_long_t inbound_handler;
33414 - atomic_long_t outbound_handler;
33415 - atomic_long_t fast_requeue;
33416 - atomic_long_t outbound_target_full;
33417 + atomic_long_unchecked_t inbound_handler;
33418 + atomic_long_unchecked_t outbound_handler;
33419 + atomic_long_unchecked_t fast_requeue;
33420 + atomic_long_unchecked_t outbound_target_full;
33421
33422 /* for debugging */
33423 - atomic_long_t debug_tl_out_timer;
33424 - atomic_long_t debug_stop_polling;
33425 - atomic_long_t debug_eqbs_all;
33426 - atomic_long_t debug_eqbs_incomplete;
33427 - atomic_long_t debug_sqbs_all;
33428 - atomic_long_t debug_sqbs_incomplete;
33429 + atomic_long_unchecked_t debug_tl_out_timer;
33430 + atomic_long_unchecked_t debug_stop_polling;
33431 + atomic_long_unchecked_t debug_eqbs_all;
33432 + atomic_long_unchecked_t debug_eqbs_incomplete;
33433 + atomic_long_unchecked_t debug_sqbs_all;
33434 + atomic_long_unchecked_t debug_sqbs_incomplete;
33435 };
33436
33437 extern struct qdio_perf_stats perf_stats;
33438 extern int qdio_performance_stats;
33439
33440 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
33441 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
33442 {
33443 if (qdio_performance_stats)
33444 - atomic_long_inc(count);
33445 + atomic_long_inc_unchecked(count);
33446 }
33447
33448 int qdio_setup_perf_stats(void);
33449 diff -urNp linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c
33450 --- linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
33451 +++ linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
33452 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
33453 u32 actual_fibsize64, actual_fibsize = 0;
33454 int i;
33455
33456 + pax_track_stack();
33457
33458 if (dev->in_reset) {
33459 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
33460 diff -urNp linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c
33461 --- linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
33462 +++ linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
33463 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
33464 flash_error_table[i].reason);
33465 }
33466
33467 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
33468 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
33469 asd_show_update_bios, asd_store_update_bios);
33470
33471 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
33472 diff -urNp linux-2.6.32.42/drivers/scsi/BusLogic.c linux-2.6.32.42/drivers/scsi/BusLogic.c
33473 --- linux-2.6.32.42/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
33474 +++ linux-2.6.32.42/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
33475 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
33476 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
33477 *PrototypeHostAdapter)
33478 {
33479 + pax_track_stack();
33480 +
33481 /*
33482 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
33483 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
33484 diff -urNp linux-2.6.32.42/drivers/scsi/dpt_i2o.c linux-2.6.32.42/drivers/scsi/dpt_i2o.c
33485 --- linux-2.6.32.42/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
33486 +++ linux-2.6.32.42/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
33487 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
33488 dma_addr_t addr;
33489 ulong flags = 0;
33490
33491 + pax_track_stack();
33492 +
33493 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
33494 // get user msg size in u32s
33495 if(get_user(size, &user_msg[0])){
33496 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
33497 s32 rcode;
33498 dma_addr_t addr;
33499
33500 + pax_track_stack();
33501 +
33502 memset(msg, 0 , sizeof(msg));
33503 len = scsi_bufflen(cmd);
33504 direction = 0x00000000;
33505 diff -urNp linux-2.6.32.42/drivers/scsi/eata.c linux-2.6.32.42/drivers/scsi/eata.c
33506 --- linux-2.6.32.42/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
33507 +++ linux-2.6.32.42/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
33508 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
33509 struct hostdata *ha;
33510 char name[16];
33511
33512 + pax_track_stack();
33513 +
33514 sprintf(name, "%s%d", driver_name, j);
33515
33516 if (!request_region(port_base, REGION_SIZE, driver_name)) {
33517 diff -urNp linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c
33518 --- linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
33519 +++ linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
33520 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
33521 size_t rlen;
33522 size_t dlen;
33523
33524 + pax_track_stack();
33525 +
33526 fiph = (struct fip_header *)skb->data;
33527 sub = fiph->fip_subcode;
33528 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
33529 diff -urNp linux-2.6.32.42/drivers/scsi/gdth.c linux-2.6.32.42/drivers/scsi/gdth.c
33530 --- linux-2.6.32.42/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
33531 +++ linux-2.6.32.42/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
33532 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
33533 ulong flags;
33534 gdth_ha_str *ha;
33535
33536 + pax_track_stack();
33537 +
33538 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
33539 return -EFAULT;
33540 ha = gdth_find_ha(ldrv.ionode);
33541 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
33542 gdth_ha_str *ha;
33543 int rval;
33544
33545 + pax_track_stack();
33546 +
33547 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
33548 res.number >= MAX_HDRIVES)
33549 return -EFAULT;
33550 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
33551 gdth_ha_str *ha;
33552 int rval;
33553
33554 + pax_track_stack();
33555 +
33556 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
33557 return -EFAULT;
33558 ha = gdth_find_ha(gen.ionode);
33559 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
33560 int i;
33561 gdth_cmd_str gdtcmd;
33562 char cmnd[MAX_COMMAND_SIZE];
33563 +
33564 + pax_track_stack();
33565 +
33566 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
33567
33568 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
33569 diff -urNp linux-2.6.32.42/drivers/scsi/gdth_proc.c linux-2.6.32.42/drivers/scsi/gdth_proc.c
33570 --- linux-2.6.32.42/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
33571 +++ linux-2.6.32.42/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
33572 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
33573 ulong64 paddr;
33574
33575 char cmnd[MAX_COMMAND_SIZE];
33576 +
33577 + pax_track_stack();
33578 +
33579 memset(cmnd, 0xff, 12);
33580 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
33581
33582 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
33583 gdth_hget_str *phg;
33584 char cmnd[MAX_COMMAND_SIZE];
33585
33586 + pax_track_stack();
33587 +
33588 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
33589 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
33590 if (!gdtcmd || !estr)
33591 diff -urNp linux-2.6.32.42/drivers/scsi/hosts.c linux-2.6.32.42/drivers/scsi/hosts.c
33592 --- linux-2.6.32.42/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
33593 +++ linux-2.6.32.42/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
33594 @@ -40,7 +40,7 @@
33595 #include "scsi_logging.h"
33596
33597
33598 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
33599 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
33600
33601
33602 static void scsi_host_cls_release(struct device *dev)
33603 @@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
33604 * subtract one because we increment first then return, but we need to
33605 * know what the next host number was before increment
33606 */
33607 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
33608 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
33609 shost->dma_channel = 0xff;
33610
33611 /* These three are default values which can be overridden */
33612 diff -urNp linux-2.6.32.42/drivers/scsi/ipr.c linux-2.6.32.42/drivers/scsi/ipr.c
33613 --- linux-2.6.32.42/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
33614 +++ linux-2.6.32.42/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
33615 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
33616 return true;
33617 }
33618
33619 -static struct ata_port_operations ipr_sata_ops = {
33620 +static const struct ata_port_operations ipr_sata_ops = {
33621 .phy_reset = ipr_ata_phy_reset,
33622 .hardreset = ipr_sata_reset,
33623 .post_internal_cmd = ipr_ata_post_internal,
33624 diff -urNp linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c
33625 --- linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
33626 +++ linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c 2011-04-17 15:56:46.000000000 -0400
33627 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
33628 * all together if not used XXX
33629 */
33630 struct {
33631 - atomic_t no_free_exch;
33632 - atomic_t no_free_exch_xid;
33633 - atomic_t xid_not_found;
33634 - atomic_t xid_busy;
33635 - atomic_t seq_not_found;
33636 - atomic_t non_bls_resp;
33637 + atomic_unchecked_t no_free_exch;
33638 + atomic_unchecked_t no_free_exch_xid;
33639 + atomic_unchecked_t xid_not_found;
33640 + atomic_unchecked_t xid_busy;
33641 + atomic_unchecked_t seq_not_found;
33642 + atomic_unchecked_t non_bls_resp;
33643 } stats;
33644 };
33645 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
33646 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
33647 /* allocate memory for exchange */
33648 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
33649 if (!ep) {
33650 - atomic_inc(&mp->stats.no_free_exch);
33651 + atomic_inc_unchecked(&mp->stats.no_free_exch);
33652 goto out;
33653 }
33654 memset(ep, 0, sizeof(*ep));
33655 @@ -557,7 +557,7 @@ out:
33656 return ep;
33657 err:
33658 spin_unlock_bh(&pool->lock);
33659 - atomic_inc(&mp->stats.no_free_exch_xid);
33660 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
33661 mempool_free(ep, mp->ep_pool);
33662 return NULL;
33663 }
33664 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33665 xid = ntohs(fh->fh_ox_id); /* we originated exch */
33666 ep = fc_exch_find(mp, xid);
33667 if (!ep) {
33668 - atomic_inc(&mp->stats.xid_not_found);
33669 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33670 reject = FC_RJT_OX_ID;
33671 goto out;
33672 }
33673 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33674 ep = fc_exch_find(mp, xid);
33675 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
33676 if (ep) {
33677 - atomic_inc(&mp->stats.xid_busy);
33678 + atomic_inc_unchecked(&mp->stats.xid_busy);
33679 reject = FC_RJT_RX_ID;
33680 goto rel;
33681 }
33682 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33683 }
33684 xid = ep->xid; /* get our XID */
33685 } else if (!ep) {
33686 - atomic_inc(&mp->stats.xid_not_found);
33687 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33688 reject = FC_RJT_RX_ID; /* XID not found */
33689 goto out;
33690 }
33691 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33692 } else {
33693 sp = &ep->seq;
33694 if (sp->id != fh->fh_seq_id) {
33695 - atomic_inc(&mp->stats.seq_not_found);
33696 + atomic_inc_unchecked(&mp->stats.seq_not_found);
33697 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
33698 goto rel;
33699 }
33700 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
33701
33702 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
33703 if (!ep) {
33704 - atomic_inc(&mp->stats.xid_not_found);
33705 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33706 goto out;
33707 }
33708 if (ep->esb_stat & ESB_ST_COMPLETE) {
33709 - atomic_inc(&mp->stats.xid_not_found);
33710 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33711 goto out;
33712 }
33713 if (ep->rxid == FC_XID_UNKNOWN)
33714 ep->rxid = ntohs(fh->fh_rx_id);
33715 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
33716 - atomic_inc(&mp->stats.xid_not_found);
33717 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33718 goto rel;
33719 }
33720 if (ep->did != ntoh24(fh->fh_s_id) &&
33721 ep->did != FC_FID_FLOGI) {
33722 - atomic_inc(&mp->stats.xid_not_found);
33723 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33724 goto rel;
33725 }
33726 sof = fr_sof(fp);
33727 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
33728 } else {
33729 sp = &ep->seq;
33730 if (sp->id != fh->fh_seq_id) {
33731 - atomic_inc(&mp->stats.seq_not_found);
33732 + atomic_inc_unchecked(&mp->stats.seq_not_found);
33733 goto rel;
33734 }
33735 }
33736 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
33737 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
33738
33739 if (!sp)
33740 - atomic_inc(&mp->stats.xid_not_found);
33741 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33742 else
33743 - atomic_inc(&mp->stats.non_bls_resp);
33744 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
33745
33746 fc_frame_free(fp);
33747 }
33748 diff -urNp linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c
33749 --- linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
33750 +++ linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
33751 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
33752 }
33753 }
33754
33755 -static struct ata_port_operations sas_sata_ops = {
33756 +static const struct ata_port_operations sas_sata_ops = {
33757 .phy_reset = sas_ata_phy_reset,
33758 .post_internal_cmd = sas_ata_post_internal,
33759 .qc_defer = ata_std_qc_defer,
33760 diff -urNp linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c
33761 --- linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
33762 +++ linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
33763 @@ -124,7 +124,7 @@ struct lpfc_debug {
33764 int len;
33765 };
33766
33767 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33768 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33769 static unsigned long lpfc_debugfs_start_time = 0L;
33770
33771 /**
33772 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
33773 lpfc_debugfs_enable = 0;
33774
33775 len = 0;
33776 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
33777 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
33778 (lpfc_debugfs_max_disc_trc - 1);
33779 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
33780 dtp = vport->disc_trc + i;
33781 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
33782 lpfc_debugfs_enable = 0;
33783
33784 len = 0;
33785 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
33786 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
33787 (lpfc_debugfs_max_slow_ring_trc - 1);
33788 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
33789 dtp = phba->slow_ring_trc + i;
33790 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
33791 uint32_t *ptr;
33792 char buffer[1024];
33793
33794 + pax_track_stack();
33795 +
33796 off = 0;
33797 spin_lock_irq(&phba->hbalock);
33798
33799 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
33800 !vport || !vport->disc_trc)
33801 return;
33802
33803 - index = atomic_inc_return(&vport->disc_trc_cnt) &
33804 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
33805 (lpfc_debugfs_max_disc_trc - 1);
33806 dtp = vport->disc_trc + index;
33807 dtp->fmt = fmt;
33808 dtp->data1 = data1;
33809 dtp->data2 = data2;
33810 dtp->data3 = data3;
33811 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33812 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33813 dtp->jif = jiffies;
33814 #endif
33815 return;
33816 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
33817 !phba || !phba->slow_ring_trc)
33818 return;
33819
33820 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
33821 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
33822 (lpfc_debugfs_max_slow_ring_trc - 1);
33823 dtp = phba->slow_ring_trc + index;
33824 dtp->fmt = fmt;
33825 dtp->data1 = data1;
33826 dtp->data2 = data2;
33827 dtp->data3 = data3;
33828 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33829 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33830 dtp->jif = jiffies;
33831 #endif
33832 return;
33833 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33834 "slow_ring buffer\n");
33835 goto debug_failed;
33836 }
33837 - atomic_set(&phba->slow_ring_trc_cnt, 0);
33838 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
33839 memset(phba->slow_ring_trc, 0,
33840 (sizeof(struct lpfc_debugfs_trc) *
33841 lpfc_debugfs_max_slow_ring_trc));
33842 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33843 "buffer\n");
33844 goto debug_failed;
33845 }
33846 - atomic_set(&vport->disc_trc_cnt, 0);
33847 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
33848
33849 snprintf(name, sizeof(name), "discovery_trace");
33850 vport->debug_disc_trc =
33851 diff -urNp linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h
33852 --- linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
33853 +++ linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
33854 @@ -400,7 +400,7 @@ struct lpfc_vport {
33855 struct dentry *debug_nodelist;
33856 struct dentry *vport_debugfs_root;
33857 struct lpfc_debugfs_trc *disc_trc;
33858 - atomic_t disc_trc_cnt;
33859 + atomic_unchecked_t disc_trc_cnt;
33860 #endif
33861 uint8_t stat_data_enabled;
33862 uint8_t stat_data_blocked;
33863 @@ -725,8 +725,8 @@ struct lpfc_hba {
33864 struct timer_list fabric_block_timer;
33865 unsigned long bit_flags;
33866 #define FABRIC_COMANDS_BLOCKED 0
33867 - atomic_t num_rsrc_err;
33868 - atomic_t num_cmd_success;
33869 + atomic_unchecked_t num_rsrc_err;
33870 + atomic_unchecked_t num_cmd_success;
33871 unsigned long last_rsrc_error_time;
33872 unsigned long last_ramp_down_time;
33873 unsigned long last_ramp_up_time;
33874 @@ -740,7 +740,7 @@ struct lpfc_hba {
33875 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
33876 struct dentry *debug_slow_ring_trc;
33877 struct lpfc_debugfs_trc *slow_ring_trc;
33878 - atomic_t slow_ring_trc_cnt;
33879 + atomic_unchecked_t slow_ring_trc_cnt;
33880 #endif
33881
33882 /* Used for deferred freeing of ELS data buffers */
33883 diff -urNp linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c
33884 --- linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
33885 +++ linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
33886 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
33887 uint32_t evt_posted;
33888
33889 spin_lock_irqsave(&phba->hbalock, flags);
33890 - atomic_inc(&phba->num_rsrc_err);
33891 + atomic_inc_unchecked(&phba->num_rsrc_err);
33892 phba->last_rsrc_error_time = jiffies;
33893
33894 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
33895 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
33896 unsigned long flags;
33897 struct lpfc_hba *phba = vport->phba;
33898 uint32_t evt_posted;
33899 - atomic_inc(&phba->num_cmd_success);
33900 + atomic_inc_unchecked(&phba->num_cmd_success);
33901
33902 if (vport->cfg_lun_queue_depth <= queue_depth)
33903 return;
33904 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33905 int i;
33906 struct lpfc_rport_data *rdata;
33907
33908 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
33909 - num_cmd_success = atomic_read(&phba->num_cmd_success);
33910 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
33911 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
33912
33913 vports = lpfc_create_vport_work_array(phba);
33914 if (vports != NULL)
33915 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33916 }
33917 }
33918 lpfc_destroy_vport_work_array(phba, vports);
33919 - atomic_set(&phba->num_rsrc_err, 0);
33920 - atomic_set(&phba->num_cmd_success, 0);
33921 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
33922 + atomic_set_unchecked(&phba->num_cmd_success, 0);
33923 }
33924
33925 /**
33926 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
33927 }
33928 }
33929 lpfc_destroy_vport_work_array(phba, vports);
33930 - atomic_set(&phba->num_rsrc_err, 0);
33931 - atomic_set(&phba->num_cmd_success, 0);
33932 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
33933 + atomic_set_unchecked(&phba->num_cmd_success, 0);
33934 }
33935
33936 /**
33937 diff -urNp linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c
33938 --- linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
33939 +++ linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
33940 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
33941 int rval;
33942 int i;
33943
33944 + pax_track_stack();
33945 +
33946 // Allocate memory for the base list of scb for management module.
33947 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
33948
33949 diff -urNp linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c
33950 --- linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
33951 +++ linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
33952 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
33953 int nelem = ARRAY_SIZE(get_attrs), a = 0;
33954 int ret;
33955
33956 + pax_track_stack();
33957 +
33958 or = osd_start_request(od, GFP_KERNEL);
33959 if (!or)
33960 return -ENOMEM;
33961 diff -urNp linux-2.6.32.42/drivers/scsi/pmcraid.c linux-2.6.32.42/drivers/scsi/pmcraid.c
33962 --- linux-2.6.32.42/drivers/scsi/pmcraid.c 2011-05-10 22:12:01.000000000 -0400
33963 +++ linux-2.6.32.42/drivers/scsi/pmcraid.c 2011-05-10 22:12:33.000000000 -0400
33964 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
33965 res->scsi_dev = scsi_dev;
33966 scsi_dev->hostdata = res;
33967 res->change_detected = 0;
33968 - atomic_set(&res->read_failures, 0);
33969 - atomic_set(&res->write_failures, 0);
33970 + atomic_set_unchecked(&res->read_failures, 0);
33971 + atomic_set_unchecked(&res->write_failures, 0);
33972 rc = 0;
33973 }
33974 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
33975 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
33976
33977 /* If this was a SCSI read/write command keep count of errors */
33978 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
33979 - atomic_inc(&res->read_failures);
33980 + atomic_inc_unchecked(&res->read_failures);
33981 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
33982 - atomic_inc(&res->write_failures);
33983 + atomic_inc_unchecked(&res->write_failures);
33984
33985 if (!RES_IS_GSCSI(res->cfg_entry) &&
33986 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
33987 @@ -4113,7 +4113,7 @@ static void pmcraid_worker_function(stru
33988
33989 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
33990 /* add resources only after host is added into system */
33991 - if (!atomic_read(&pinstance->expose_resources))
33992 + if (!atomic_read_unchecked(&pinstance->expose_resources))
33993 return;
33994
33995 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
33996 @@ -4847,7 +4847,7 @@ static int __devinit pmcraid_init_instan
33997 init_waitqueue_head(&pinstance->reset_wait_q);
33998
33999 atomic_set(&pinstance->outstanding_cmds, 0);
34000 - atomic_set(&pinstance->expose_resources, 0);
34001 + atomic_set_unchecked(&pinstance->expose_resources, 0);
34002
34003 INIT_LIST_HEAD(&pinstance->free_res_q);
34004 INIT_LIST_HEAD(&pinstance->used_res_q);
34005 @@ -5499,7 +5499,7 @@ static int __devinit pmcraid_probe(
34006 /* Schedule worker thread to handle CCN and take care of adding and
34007 * removing devices to OS
34008 */
34009 - atomic_set(&pinstance->expose_resources, 1);
34010 + atomic_set_unchecked(&pinstance->expose_resources, 1);
34011 schedule_work(&pinstance->worker_q);
34012 return rc;
34013
34014 diff -urNp linux-2.6.32.42/drivers/scsi/pmcraid.h linux-2.6.32.42/drivers/scsi/pmcraid.h
34015 --- linux-2.6.32.42/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
34016 +++ linux-2.6.32.42/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
34017 @@ -690,7 +690,7 @@ struct pmcraid_instance {
34018 atomic_t outstanding_cmds;
34019
34020 /* should add/delete resources to mid-layer now ?*/
34021 - atomic_t expose_resources;
34022 + atomic_unchecked_t expose_resources;
34023
34024 /* Tasklet to handle deferred processing */
34025 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
34026 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
34027 struct list_head queue; /* link to "to be exposed" resources */
34028 struct pmcraid_config_table_entry cfg_entry;
34029 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34030 - atomic_t read_failures; /* count of failed READ commands */
34031 - atomic_t write_failures; /* count of failed WRITE commands */
34032 + atomic_unchecked_t read_failures; /* count of failed READ commands */
34033 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34034
34035 /* To indicate add/delete/modify during CCN */
34036 u8 change_detected;
34037 diff -urNp linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h
34038 --- linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
34039 +++ linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
34040 @@ -240,7 +240,7 @@ struct ddb_entry {
34041 atomic_t retry_relogin_timer; /* Min Time between relogins
34042 * (4000 only) */
34043 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
34044 - atomic_t relogin_retry_count; /* Num of times relogin has been
34045 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34046 * retried */
34047
34048 uint16_t port;
34049 diff -urNp linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c
34050 --- linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
34051 +++ linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
34052 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
34053 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
34054 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34055 atomic_set(&ddb_entry->relogin_timer, 0);
34056 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34057 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34058 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34059 list_add_tail(&ddb_entry->list, &ha->ddb_list);
34060 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
34061 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
34062 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34063 atomic_set(&ddb_entry->port_down_timer,
34064 ha->port_down_retry_count);
34065 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34066 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34067 atomic_set(&ddb_entry->relogin_timer, 0);
34068 clear_bit(DF_RELOGIN, &ddb_entry->flags);
34069 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
34070 diff -urNp linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c
34071 --- linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
34072 +++ linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
34073 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
34074 ddb_entry->fw_ddb_device_state ==
34075 DDB_DS_SESSION_FAILED) {
34076 /* Reset retry relogin timer */
34077 - atomic_inc(&ddb_entry->relogin_retry_count);
34078 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34079 DEBUG2(printk("scsi%ld: index[%d] relogin"
34080 " timed out-retrying"
34081 " relogin (%d)\n",
34082 ha->host_no,
34083 ddb_entry->fw_ddb_index,
34084 - atomic_read(&ddb_entry->
34085 + atomic_read_unchecked(&ddb_entry->
34086 relogin_retry_count))
34087 );
34088 start_dpc++;
34089 diff -urNp linux-2.6.32.42/drivers/scsi/scsi.c linux-2.6.32.42/drivers/scsi/scsi.c
34090 --- linux-2.6.32.42/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
34091 +++ linux-2.6.32.42/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
34092 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
34093 unsigned long timeout;
34094 int rtn = 0;
34095
34096 - atomic_inc(&cmd->device->iorequest_cnt);
34097 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34098
34099 /* check if the device is still usable */
34100 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34101 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_debug.c linux-2.6.32.42/drivers/scsi/scsi_debug.c
34102 --- linux-2.6.32.42/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
34103 +++ linux-2.6.32.42/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
34104 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
34105 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
34106 unsigned char *cmd = (unsigned char *)scp->cmnd;
34107
34108 + pax_track_stack();
34109 +
34110 if ((errsts = check_readiness(scp, 1, devip)))
34111 return errsts;
34112 memset(arr, 0, sizeof(arr));
34113 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
34114 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
34115 unsigned char *cmd = (unsigned char *)scp->cmnd;
34116
34117 + pax_track_stack();
34118 +
34119 if ((errsts = check_readiness(scp, 1, devip)))
34120 return errsts;
34121 memset(arr, 0, sizeof(arr));
34122 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_lib.c linux-2.6.32.42/drivers/scsi/scsi_lib.c
34123 --- linux-2.6.32.42/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
34124 +++ linux-2.6.32.42/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
34125 @@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
34126
34127 scsi_init_cmd_errh(cmd);
34128 cmd->result = DID_NO_CONNECT << 16;
34129 - atomic_inc(&cmd->device->iorequest_cnt);
34130 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34131
34132 /*
34133 * SCSI request completion path will do scsi_device_unbusy(),
34134 @@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
34135 */
34136 cmd->serial_number = 0;
34137
34138 - atomic_inc(&cmd->device->iodone_cnt);
34139 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
34140 if (cmd->result)
34141 - atomic_inc(&cmd->device->ioerr_cnt);
34142 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34143
34144 disposition = scsi_decide_disposition(cmd);
34145 if (disposition != SUCCESS &&
34146 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_sysfs.c linux-2.6.32.42/drivers/scsi/scsi_sysfs.c
34147 --- linux-2.6.32.42/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
34148 +++ linux-2.6.32.42/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
34149 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
34150 char *buf) \
34151 { \
34152 struct scsi_device *sdev = to_scsi_device(dev); \
34153 - unsigned long long count = atomic_read(&sdev->field); \
34154 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
34155 return snprintf(buf, 20, "0x%llx\n", count); \
34156 } \
34157 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34158 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c
34159 --- linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
34160 +++ linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
34161 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
34162 * Netlink Infrastructure
34163 */
34164
34165 -static atomic_t fc_event_seq;
34166 +static atomic_unchecked_t fc_event_seq;
34167
34168 /**
34169 * fc_get_event_number - Obtain the next sequential FC event number
34170 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
34171 u32
34172 fc_get_event_number(void)
34173 {
34174 - return atomic_add_return(1, &fc_event_seq);
34175 + return atomic_add_return_unchecked(1, &fc_event_seq);
34176 }
34177 EXPORT_SYMBOL(fc_get_event_number);
34178
34179 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void
34180 {
34181 int error;
34182
34183 - atomic_set(&fc_event_seq, 0);
34184 + atomic_set_unchecked(&fc_event_seq, 0);
34185
34186 error = transport_class_register(&fc_host_class);
34187 if (error)
34188 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c
34189 --- linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
34190 +++ linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
34191 @@ -81,7 +81,7 @@ struct iscsi_internal {
34192 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34193 };
34194
34195 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34196 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34197 static struct workqueue_struct *iscsi_eh_timer_workq;
34198
34199 /*
34200 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
34201 int err;
34202
34203 ihost = shost->shost_data;
34204 - session->sid = atomic_add_return(1, &iscsi_session_nr);
34205 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34206
34207 if (id == ISCSI_MAX_TARGET) {
34208 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34209 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
34210 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34211 ISCSI_TRANSPORT_VERSION);
34212
34213 - atomic_set(&iscsi_session_nr, 0);
34214 + atomic_set_unchecked(&iscsi_session_nr, 0);
34215
34216 err = class_register(&iscsi_transport_class);
34217 if (err)
34218 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c
34219 --- linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
34220 +++ linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
34221 @@ -33,7 +33,7 @@
34222 #include "scsi_transport_srp_internal.h"
34223
34224 struct srp_host_attrs {
34225 - atomic_t next_port_id;
34226 + atomic_unchecked_t next_port_id;
34227 };
34228 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34229
34230 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34231 struct Scsi_Host *shost = dev_to_shost(dev);
34232 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34233
34234 - atomic_set(&srp_host->next_port_id, 0);
34235 + atomic_set_unchecked(&srp_host->next_port_id, 0);
34236 return 0;
34237 }
34238
34239 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34240 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34241 rport->roles = ids->roles;
34242
34243 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34244 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34245 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34246
34247 transport_setup_device(&rport->dev);
34248 diff -urNp linux-2.6.32.42/drivers/scsi/sg.c linux-2.6.32.42/drivers/scsi/sg.c
34249 --- linux-2.6.32.42/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
34250 +++ linux-2.6.32.42/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
34251 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
34252 const struct file_operations * fops;
34253 };
34254
34255 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34256 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34257 {"allow_dio", &adio_fops},
34258 {"debug", &debug_fops},
34259 {"def_reserved_size", &dressz_fops},
34260 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
34261 {
34262 int k, mask;
34263 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34264 - struct sg_proc_leaf * leaf;
34265 + const struct sg_proc_leaf * leaf;
34266
34267 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34268 if (!sg_proc_sgp)
34269 diff -urNp linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c
34270 --- linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
34271 +++ linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
34272 @@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
34273 int do_iounmap = 0;
34274 int do_disable_device = 1;
34275
34276 + pax_track_stack();
34277 +
34278 memset(&sym_dev, 0, sizeof(sym_dev));
34279 memset(&nvram, 0, sizeof(nvram));
34280 sym_dev.pdev = pdev;
34281 diff -urNp linux-2.6.32.42/drivers/serial/kgdboc.c linux-2.6.32.42/drivers/serial/kgdboc.c
34282 --- linux-2.6.32.42/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
34283 +++ linux-2.6.32.42/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
34284 @@ -18,7 +18,7 @@
34285
34286 #define MAX_CONFIG_LEN 40
34287
34288 -static struct kgdb_io kgdboc_io_ops;
34289 +static const struct kgdb_io kgdboc_io_ops;
34290
34291 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
34292 static int configured = -1;
34293 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
34294 module_put(THIS_MODULE);
34295 }
34296
34297 -static struct kgdb_io kgdboc_io_ops = {
34298 +static const struct kgdb_io kgdboc_io_ops = {
34299 .name = "kgdboc",
34300 .read_char = kgdboc_get_char,
34301 .write_char = kgdboc_put_char,
34302 diff -urNp linux-2.6.32.42/drivers/spi/spi.c linux-2.6.32.42/drivers/spi/spi.c
34303 --- linux-2.6.32.42/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
34304 +++ linux-2.6.32.42/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
34305 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
34306 EXPORT_SYMBOL_GPL(spi_sync);
34307
34308 /* portable code must never pass more than 32 bytes */
34309 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34310 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
34311
34312 static u8 *buf;
34313
34314 diff -urNp linux-2.6.32.42/drivers/staging/android/binder.c linux-2.6.32.42/drivers/staging/android/binder.c
34315 --- linux-2.6.32.42/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
34316 +++ linux-2.6.32.42/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
34317 @@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
34318 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
34319 }
34320
34321 -static struct vm_operations_struct binder_vm_ops = {
34322 +static const struct vm_operations_struct binder_vm_ops = {
34323 .open = binder_vma_open,
34324 .close = binder_vma_close,
34325 };
34326 diff -urNp linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c
34327 --- linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
34328 +++ linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
34329 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
34330 return VM_FAULT_NOPAGE;
34331 }
34332
34333 -static struct vm_operations_struct b3dfg_vm_ops = {
34334 +static const struct vm_operations_struct b3dfg_vm_ops = {
34335 .fault = b3dfg_vma_fault,
34336 };
34337
34338 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
34339 return r;
34340 }
34341
34342 -static struct file_operations b3dfg_fops = {
34343 +static const struct file_operations b3dfg_fops = {
34344 .owner = THIS_MODULE,
34345 .open = b3dfg_open,
34346 .release = b3dfg_release,
34347 diff -urNp linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c
34348 --- linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c 2011-03-27 14:31:47.000000000 -0400
34349 +++ linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c 2011-04-17 15:56:46.000000000 -0400
34350 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
34351 mutex_unlock(&dev->mutex);
34352 }
34353
34354 -static struct vm_operations_struct comedi_vm_ops = {
34355 +static const struct vm_operations_struct comedi_vm_ops = {
34356 .close = comedi_unmap,
34357 };
34358
34359 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c
34360 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
34361 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
34362 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
34363 static dev_t adsp_devno;
34364 static struct class *adsp_class;
34365
34366 -static struct file_operations adsp_fops = {
34367 +static const struct file_operations adsp_fops = {
34368 .owner = THIS_MODULE,
34369 .open = adsp_open,
34370 .unlocked_ioctl = adsp_ioctl,
34371 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c
34372 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
34373 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
34374 @@ -1022,7 +1022,7 @@ done:
34375 return rc;
34376 }
34377
34378 -static struct file_operations audio_aac_fops = {
34379 +static const struct file_operations audio_aac_fops = {
34380 .owner = THIS_MODULE,
34381 .open = audio_open,
34382 .release = audio_release,
34383 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c
34384 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
34385 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
34386 @@ -833,7 +833,7 @@ done:
34387 return rc;
34388 }
34389
34390 -static struct file_operations audio_amrnb_fops = {
34391 +static const struct file_operations audio_amrnb_fops = {
34392 .owner = THIS_MODULE,
34393 .open = audamrnb_open,
34394 .release = audamrnb_release,
34395 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c
34396 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
34397 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
34398 @@ -805,7 +805,7 @@ dma_fail:
34399 return rc;
34400 }
34401
34402 -static struct file_operations audio_evrc_fops = {
34403 +static const struct file_operations audio_evrc_fops = {
34404 .owner = THIS_MODULE,
34405 .open = audevrc_open,
34406 .release = audevrc_release,
34407 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c
34408 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
34409 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
34410 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
34411 return 0;
34412 }
34413
34414 -static struct file_operations audio_fops = {
34415 +static const struct file_operations audio_fops = {
34416 .owner = THIS_MODULE,
34417 .open = audio_in_open,
34418 .release = audio_in_release,
34419 @@ -922,7 +922,7 @@ static struct file_operations audio_fops
34420 .unlocked_ioctl = audio_in_ioctl,
34421 };
34422
34423 -static struct file_operations audpre_fops = {
34424 +static const struct file_operations audpre_fops = {
34425 .owner = THIS_MODULE,
34426 .open = audpre_open,
34427 .unlocked_ioctl = audpre_ioctl,
34428 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c
34429 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
34430 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
34431 @@ -941,7 +941,7 @@ done:
34432 return rc;
34433 }
34434
34435 -static struct file_operations audio_mp3_fops = {
34436 +static const struct file_operations audio_mp3_fops = {
34437 .owner = THIS_MODULE,
34438 .open = audio_open,
34439 .release = audio_release,
34440 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c
34441 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
34442 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
34443 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
34444 return 0;
34445 }
34446
34447 -static struct file_operations audio_fops = {
34448 +static const struct file_operations audio_fops = {
34449 .owner = THIS_MODULE,
34450 .open = audio_open,
34451 .release = audio_release,
34452 @@ -819,7 +819,7 @@ static struct file_operations audio_fops
34453 .unlocked_ioctl = audio_ioctl,
34454 };
34455
34456 -static struct file_operations audpp_fops = {
34457 +static const struct file_operations audpp_fops = {
34458 .owner = THIS_MODULE,
34459 .open = audpp_open,
34460 .unlocked_ioctl = audpp_ioctl,
34461 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c
34462 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
34463 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
34464 @@ -816,7 +816,7 @@ err:
34465 return rc;
34466 }
34467
34468 -static struct file_operations audio_qcelp_fops = {
34469 +static const struct file_operations audio_qcelp_fops = {
34470 .owner = THIS_MODULE,
34471 .open = audqcelp_open,
34472 .release = audqcelp_release,
34473 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c
34474 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
34475 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
34476 @@ -242,7 +242,7 @@ err:
34477 return rc;
34478 }
34479
34480 -static struct file_operations snd_fops = {
34481 +static const struct file_operations snd_fops = {
34482 .owner = THIS_MODULE,
34483 .open = snd_open,
34484 .release = snd_release,
34485 diff -urNp linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c
34486 --- linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
34487 +++ linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
34488 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
34489 return 0;
34490 }
34491
34492 -static struct file_operations qmi_fops = {
34493 +static const struct file_operations qmi_fops = {
34494 .owner = THIS_MODULE,
34495 .read = qmi_read,
34496 .write = qmi_write,
34497 diff -urNp linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c
34498 --- linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
34499 +++ linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
34500 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
34501 return rc;
34502 }
34503
34504 -static struct file_operations rpcrouter_server_fops = {
34505 +static const struct file_operations rpcrouter_server_fops = {
34506 .owner = THIS_MODULE,
34507 .open = rpcrouter_open,
34508 .release = rpcrouter_release,
34509 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
34510 .unlocked_ioctl = rpcrouter_ioctl,
34511 };
34512
34513 -static struct file_operations rpcrouter_router_fops = {
34514 +static const struct file_operations rpcrouter_router_fops = {
34515 .owner = THIS_MODULE,
34516 .open = rpcrouter_open,
34517 .release = rpcrouter_release,
34518 diff -urNp linux-2.6.32.42/drivers/staging/dst/dcore.c linux-2.6.32.42/drivers/staging/dst/dcore.c
34519 --- linux-2.6.32.42/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
34520 +++ linux-2.6.32.42/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
34521 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
34522 return 0;
34523 }
34524
34525 -static struct block_device_operations dst_blk_ops = {
34526 +static const struct block_device_operations dst_blk_ops = {
34527 .open = dst_bdev_open,
34528 .release = dst_bdev_release,
34529 .owner = THIS_MODULE,
34530 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
34531 n->size = ctl->size;
34532
34533 atomic_set(&n->refcnt, 1);
34534 - atomic_long_set(&n->gen, 0);
34535 + atomic_long_set_unchecked(&n->gen, 0);
34536 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
34537
34538 err = dst_node_sysfs_init(n);
34539 diff -urNp linux-2.6.32.42/drivers/staging/dst/trans.c linux-2.6.32.42/drivers/staging/dst/trans.c
34540 --- linux-2.6.32.42/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
34541 +++ linux-2.6.32.42/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
34542 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
34543 t->error = 0;
34544 t->retries = 0;
34545 atomic_set(&t->refcnt, 1);
34546 - t->gen = atomic_long_inc_return(&n->gen);
34547 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
34548
34549 t->enc = bio_data_dir(bio);
34550 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
34551 diff -urNp linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c
34552 --- linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
34553 +++ linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
34554 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
34555 struct net_device_stats *stats = &etdev->net_stats;
34556
34557 if (pMpTcb->Flags & fMP_DEST_BROAD)
34558 - atomic_inc(&etdev->Stats.brdcstxmt);
34559 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
34560 else if (pMpTcb->Flags & fMP_DEST_MULTI)
34561 - atomic_inc(&etdev->Stats.multixmt);
34562 + atomic_inc_unchecked(&etdev->Stats.multixmt);
34563 else
34564 - atomic_inc(&etdev->Stats.unixmt);
34565 + atomic_inc_unchecked(&etdev->Stats.unixmt);
34566
34567 if (pMpTcb->Packet) {
34568 stats->tx_bytes += pMpTcb->Packet->len;
34569 diff -urNp linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h
34570 --- linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
34571 +++ linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
34572 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
34573 * operations
34574 */
34575 u32 unircv; /* # multicast packets received */
34576 - atomic_t unixmt; /* # multicast packets for Tx */
34577 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
34578 u32 multircv; /* # multicast packets received */
34579 - atomic_t multixmt; /* # multicast packets for Tx */
34580 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
34581 u32 brdcstrcv; /* # broadcast packets received */
34582 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
34583 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
34584 u32 norcvbuf; /* # Rx packets discarded */
34585 u32 noxmtbuf; /* # Tx packets discarded */
34586
34587 diff -urNp linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c
34588 --- linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
34589 +++ linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
34590 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
34591 return 0;
34592 }
34593
34594 -static struct vm_operations_struct go7007_vm_ops = {
34595 +static const struct vm_operations_struct go7007_vm_ops = {
34596 .open = go7007_vm_open,
34597 .close = go7007_vm_close,
34598 .fault = go7007_vm_fault,
34599 diff -urNp linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c
34600 --- linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
34601 +++ linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
34602 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
34603 /* The one and only one */
34604 static struct blkvsc_driver_context g_blkvsc_drv;
34605
34606 -static struct block_device_operations block_ops = {
34607 +static const struct block_device_operations block_ops = {
34608 .owner = THIS_MODULE,
34609 .open = blkvsc_open,
34610 .release = blkvsc_release,
34611 diff -urNp linux-2.6.32.42/drivers/staging/hv/Channel.c linux-2.6.32.42/drivers/staging/hv/Channel.c
34612 --- linux-2.6.32.42/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
34613 +++ linux-2.6.32.42/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
34614 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
34615
34616 DPRINT_ENTER(VMBUS);
34617
34618 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
34619 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
34620 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
34621 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
34622
34623 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
34624 ASSERT(msgInfo != NULL);
34625 diff -urNp linux-2.6.32.42/drivers/staging/hv/Hv.c linux-2.6.32.42/drivers/staging/hv/Hv.c
34626 --- linux-2.6.32.42/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
34627 +++ linux-2.6.32.42/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
34628 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
34629 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
34630 u32 outputAddressHi = outputAddress >> 32;
34631 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
34632 - volatile void *hypercallPage = gHvContext.HypercallPage;
34633 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
34634
34635 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
34636 Control, Input, Output);
34637 diff -urNp linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c
34638 --- linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
34639 +++ linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
34640 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
34641 to_device_context(root_device_obj);
34642 struct device_context *child_device_ctx =
34643 to_device_context(child_device_obj);
34644 - static atomic_t device_num = ATOMIC_INIT(0);
34645 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
34646
34647 DPRINT_ENTER(VMBUS_DRV);
34648
34649 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
34650
34651 /* Set the device name. Otherwise, device_register() will fail. */
34652 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
34653 - atomic_inc_return(&device_num));
34654 + atomic_inc_return_unchecked(&device_num));
34655
34656 /* The new device belongs to this bus */
34657 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
34658 diff -urNp linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h
34659 --- linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
34660 +++ linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
34661 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
34662 struct VMBUS_CONNECTION {
34663 enum VMBUS_CONNECT_STATE ConnectState;
34664
34665 - atomic_t NextGpadlHandle;
34666 + atomic_unchecked_t NextGpadlHandle;
34667
34668 /*
34669 * Represents channel interrupts. Each bit position represents a
34670 diff -urNp linux-2.6.32.42/drivers/staging/octeon/ethernet.c linux-2.6.32.42/drivers/staging/octeon/ethernet.c
34671 --- linux-2.6.32.42/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
34672 +++ linux-2.6.32.42/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
34673 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
34674 * since the RX tasklet also increments it.
34675 */
34676 #ifdef CONFIG_64BIT
34677 - atomic64_add(rx_status.dropped_packets,
34678 - (atomic64_t *)&priv->stats.rx_dropped);
34679 + atomic64_add_unchecked(rx_status.dropped_packets,
34680 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34681 #else
34682 - atomic_add(rx_status.dropped_packets,
34683 - (atomic_t *)&priv->stats.rx_dropped);
34684 + atomic_add_unchecked(rx_status.dropped_packets,
34685 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
34686 #endif
34687 }
34688
34689 diff -urNp linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c
34690 --- linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
34691 +++ linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
34692 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
34693 /* Increment RX stats for virtual ports */
34694 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
34695 #ifdef CONFIG_64BIT
34696 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
34697 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
34698 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
34699 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
34700 #else
34701 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
34702 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
34703 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
34704 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
34705 #endif
34706 }
34707 netif_receive_skb(skb);
34708 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
34709 dev->name);
34710 */
34711 #ifdef CONFIG_64BIT
34712 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
34713 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
34714 #else
34715 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
34716 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
34717 #endif
34718 dev_kfree_skb_irq(skb);
34719 }
34720 diff -urNp linux-2.6.32.42/drivers/staging/panel/panel.c linux-2.6.32.42/drivers/staging/panel/panel.c
34721 --- linux-2.6.32.42/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
34722 +++ linux-2.6.32.42/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
34723 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
34724 return 0;
34725 }
34726
34727 -static struct file_operations lcd_fops = {
34728 +static const struct file_operations lcd_fops = {
34729 .write = lcd_write,
34730 .open = lcd_open,
34731 .release = lcd_release,
34732 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
34733 return 0;
34734 }
34735
34736 -static struct file_operations keypad_fops = {
34737 +static const struct file_operations keypad_fops = {
34738 .read = keypad_read, /* read */
34739 .open = keypad_open, /* open */
34740 .release = keypad_release, /* close */
34741 diff -urNp linux-2.6.32.42/drivers/staging/phison/phison.c linux-2.6.32.42/drivers/staging/phison/phison.c
34742 --- linux-2.6.32.42/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
34743 +++ linux-2.6.32.42/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
34744 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_
34745 ATA_BMDMA_SHT(DRV_NAME),
34746 };
34747
34748 -static struct ata_port_operations phison_ops = {
34749 +static const struct ata_port_operations phison_ops = {
34750 .inherits = &ata_bmdma_port_ops,
34751 .prereset = phison_pre_reset,
34752 };
34753 diff -urNp linux-2.6.32.42/drivers/staging/poch/poch.c linux-2.6.32.42/drivers/staging/poch/poch.c
34754 --- linux-2.6.32.42/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
34755 +++ linux-2.6.32.42/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
34756 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
34757 return 0;
34758 }
34759
34760 -static struct file_operations poch_fops = {
34761 +static const struct file_operations poch_fops = {
34762 .owner = THIS_MODULE,
34763 .open = poch_open,
34764 .release = poch_release,
34765 diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/inode.c linux-2.6.32.42/drivers/staging/pohmelfs/inode.c
34766 --- linux-2.6.32.42/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
34767 +++ linux-2.6.32.42/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
34768 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
34769 mutex_init(&psb->mcache_lock);
34770 psb->mcache_root = RB_ROOT;
34771 psb->mcache_timeout = msecs_to_jiffies(5000);
34772 - atomic_long_set(&psb->mcache_gen, 0);
34773 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
34774
34775 psb->trans_max_pages = 100;
34776
34777 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
34778 INIT_LIST_HEAD(&psb->crypto_ready_list);
34779 INIT_LIST_HEAD(&psb->crypto_active_list);
34780
34781 - atomic_set(&psb->trans_gen, 1);
34782 + atomic_set_unchecked(&psb->trans_gen, 1);
34783 atomic_long_set(&psb->total_inodes, 0);
34784
34785 mutex_init(&psb->state_lock);
34786 diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c
34787 --- linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
34788 +++ linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
34789 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
34790 m->data = data;
34791 m->start = start;
34792 m->size = size;
34793 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
34794 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
34795
34796 mutex_lock(&psb->mcache_lock);
34797 err = pohmelfs_mcache_insert(psb, m);
34798 diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h
34799 --- linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
34800 +++ linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
34801 @@ -570,14 +570,14 @@ struct pohmelfs_config;
34802 struct pohmelfs_sb {
34803 struct rb_root mcache_root;
34804 struct mutex mcache_lock;
34805 - atomic_long_t mcache_gen;
34806 + atomic_long_unchecked_t mcache_gen;
34807 unsigned long mcache_timeout;
34808
34809 unsigned int idx;
34810
34811 unsigned int trans_retries;
34812
34813 - atomic_t trans_gen;
34814 + atomic_unchecked_t trans_gen;
34815
34816 unsigned int crypto_attached_size;
34817 unsigned int crypto_align_size;
34818 diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/trans.c linux-2.6.32.42/drivers/staging/pohmelfs/trans.c
34819 --- linux-2.6.32.42/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
34820 +++ linux-2.6.32.42/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
34821 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
34822 int err;
34823 struct netfs_cmd *cmd = t->iovec.iov_base;
34824
34825 - t->gen = atomic_inc_return(&psb->trans_gen);
34826 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
34827
34828 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
34829 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
34830 diff -urNp linux-2.6.32.42/drivers/staging/sep/sep_driver.c linux-2.6.32.42/drivers/staging/sep/sep_driver.c
34831 --- linux-2.6.32.42/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
34832 +++ linux-2.6.32.42/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
34833 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
34834 static dev_t sep_devno;
34835
34836 /* the files operations structure of the driver */
34837 -static struct file_operations sep_file_operations = {
34838 +static const struct file_operations sep_file_operations = {
34839 .owner = THIS_MODULE,
34840 .ioctl = sep_ioctl,
34841 .poll = sep_poll,
34842 diff -urNp linux-2.6.32.42/drivers/staging/usbip/vhci.h linux-2.6.32.42/drivers/staging/usbip/vhci.h
34843 --- linux-2.6.32.42/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
34844 +++ linux-2.6.32.42/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
34845 @@ -92,7 +92,7 @@ struct vhci_hcd {
34846 unsigned resuming:1;
34847 unsigned long re_timeout;
34848
34849 - atomic_t seqnum;
34850 + atomic_unchecked_t seqnum;
34851
34852 /*
34853 * NOTE:
34854 diff -urNp linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c
34855 --- linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
34856 +++ linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
34857 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
34858 return;
34859 }
34860
34861 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
34862 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34863 if (priv->seqnum == 0xffff)
34864 usbip_uinfo("seqnum max\n");
34865
34866 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
34867 return -ENOMEM;
34868 }
34869
34870 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
34871 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34872 if (unlink->seqnum == 0xffff)
34873 usbip_uinfo("seqnum max\n");
34874
34875 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
34876 vdev->rhport = rhport;
34877 }
34878
34879 - atomic_set(&vhci->seqnum, 0);
34880 + atomic_set_unchecked(&vhci->seqnum, 0);
34881 spin_lock_init(&vhci->lock);
34882
34883
34884 diff -urNp linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c
34885 --- linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
34886 +++ linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
34887 @@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
34888 usbip_uerr("cannot find a urb of seqnum %u\n",
34889 pdu->base.seqnum);
34890 usbip_uinfo("max seqnum %d\n",
34891 - atomic_read(&the_controller->seqnum));
34892 + atomic_read_unchecked(&the_controller->seqnum));
34893 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
34894 return;
34895 }
34896 diff -urNp linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c
34897 --- linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
34898 +++ linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
34899 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
34900 static int __init vme_user_probe(struct device *, int, int);
34901 static int __exit vme_user_remove(struct device *, int, int);
34902
34903 -static struct file_operations vme_user_fops = {
34904 +static const struct file_operations vme_user_fops = {
34905 .open = vme_user_open,
34906 .release = vme_user_release,
34907 .read = vme_user_read,
34908 diff -urNp linux-2.6.32.42/drivers/telephony/ixj.c linux-2.6.32.42/drivers/telephony/ixj.c
34909 --- linux-2.6.32.42/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
34910 +++ linux-2.6.32.42/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
34911 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
34912 bool mContinue;
34913 char *pIn, *pOut;
34914
34915 + pax_track_stack();
34916 +
34917 if (!SCI_Prepare(j))
34918 return 0;
34919
34920 diff -urNp linux-2.6.32.42/drivers/uio/uio.c linux-2.6.32.42/drivers/uio/uio.c
34921 --- linux-2.6.32.42/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
34922 +++ linux-2.6.32.42/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
34923 @@ -23,6 +23,7 @@
34924 #include <linux/string.h>
34925 #include <linux/kobject.h>
34926 #include <linux/uio_driver.h>
34927 +#include <asm/local.h>
34928
34929 #define UIO_MAX_DEVICES 255
34930
34931 @@ -30,10 +31,10 @@ struct uio_device {
34932 struct module *owner;
34933 struct device *dev;
34934 int minor;
34935 - atomic_t event;
34936 + atomic_unchecked_t event;
34937 struct fasync_struct *async_queue;
34938 wait_queue_head_t wait;
34939 - int vma_count;
34940 + local_t vma_count;
34941 struct uio_info *info;
34942 struct kobject *map_dir;
34943 struct kobject *portio_dir;
34944 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
34945 return entry->show(mem, buf);
34946 }
34947
34948 -static struct sysfs_ops map_sysfs_ops = {
34949 +static const struct sysfs_ops map_sysfs_ops = {
34950 .show = map_type_show,
34951 };
34952
34953 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
34954 return entry->show(port, buf);
34955 }
34956
34957 -static struct sysfs_ops portio_sysfs_ops = {
34958 +static const struct sysfs_ops portio_sysfs_ops = {
34959 .show = portio_type_show,
34960 };
34961
34962 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device
34963 struct uio_device *idev = dev_get_drvdata(dev);
34964 if (idev)
34965 return sprintf(buf, "%u\n",
34966 - (unsigned int)atomic_read(&idev->event));
34967 + (unsigned int)atomic_read_unchecked(&idev->event));
34968 else
34969 return -ENODEV;
34970 }
34971 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
34972 {
34973 struct uio_device *idev = info->uio_dev;
34974
34975 - atomic_inc(&idev->event);
34976 + atomic_inc_unchecked(&idev->event);
34977 wake_up_interruptible(&idev->wait);
34978 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
34979 }
34980 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
34981 }
34982
34983 listener->dev = idev;
34984 - listener->event_count = atomic_read(&idev->event);
34985 + listener->event_count = atomic_read_unchecked(&idev->event);
34986 filep->private_data = listener;
34987
34988 if (idev->info->open) {
34989 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
34990 return -EIO;
34991
34992 poll_wait(filep, &idev->wait, wait);
34993 - if (listener->event_count != atomic_read(&idev->event))
34994 + if (listener->event_count != atomic_read_unchecked(&idev->event))
34995 return POLLIN | POLLRDNORM;
34996 return 0;
34997 }
34998 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
34999 do {
35000 set_current_state(TASK_INTERRUPTIBLE);
35001
35002 - event_count = atomic_read(&idev->event);
35003 + event_count = atomic_read_unchecked(&idev->event);
35004 if (event_count != listener->event_count) {
35005 if (copy_to_user(buf, &event_count, count))
35006 retval = -EFAULT;
35007 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
35008 static void uio_vma_open(struct vm_area_struct *vma)
35009 {
35010 struct uio_device *idev = vma->vm_private_data;
35011 - idev->vma_count++;
35012 + local_inc(&idev->vma_count);
35013 }
35014
35015 static void uio_vma_close(struct vm_area_struct *vma)
35016 {
35017 struct uio_device *idev = vma->vm_private_data;
35018 - idev->vma_count--;
35019 + local_dec(&idev->vma_count);
35020 }
35021
35022 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35023 @@ -840,7 +841,7 @@ int __uio_register_device(struct module
35024 idev->owner = owner;
35025 idev->info = info;
35026 init_waitqueue_head(&idev->wait);
35027 - atomic_set(&idev->event, 0);
35028 + atomic_set_unchecked(&idev->event, 0);
35029
35030 ret = uio_get_minor(idev);
35031 if (ret)
35032 diff -urNp linux-2.6.32.42/drivers/usb/atm/usbatm.c linux-2.6.32.42/drivers/usb/atm/usbatm.c
35033 --- linux-2.6.32.42/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
35034 +++ linux-2.6.32.42/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
35035 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
35036 if (printk_ratelimit())
35037 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
35038 __func__, vpi, vci);
35039 - atomic_inc(&vcc->stats->rx_err);
35040 + atomic_inc_unchecked(&vcc->stats->rx_err);
35041 return;
35042 }
35043
35044 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
35045 if (length > ATM_MAX_AAL5_PDU) {
35046 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
35047 __func__, length, vcc);
35048 - atomic_inc(&vcc->stats->rx_err);
35049 + atomic_inc_unchecked(&vcc->stats->rx_err);
35050 goto out;
35051 }
35052
35053 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
35054 if (sarb->len < pdu_length) {
35055 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
35056 __func__, pdu_length, sarb->len, vcc);
35057 - atomic_inc(&vcc->stats->rx_err);
35058 + atomic_inc_unchecked(&vcc->stats->rx_err);
35059 goto out;
35060 }
35061
35062 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
35063 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
35064 __func__, vcc);
35065 - atomic_inc(&vcc->stats->rx_err);
35066 + atomic_inc_unchecked(&vcc->stats->rx_err);
35067 goto out;
35068 }
35069
35070 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
35071 if (printk_ratelimit())
35072 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
35073 __func__, length);
35074 - atomic_inc(&vcc->stats->rx_drop);
35075 + atomic_inc_unchecked(&vcc->stats->rx_drop);
35076 goto out;
35077 }
35078
35079 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
35080
35081 vcc->push(vcc, skb);
35082
35083 - atomic_inc(&vcc->stats->rx);
35084 + atomic_inc_unchecked(&vcc->stats->rx);
35085 out:
35086 skb_trim(sarb, 0);
35087 }
35088 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
35089 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
35090
35091 usbatm_pop(vcc, skb);
35092 - atomic_inc(&vcc->stats->tx);
35093 + atomic_inc_unchecked(&vcc->stats->tx);
35094
35095 skb = skb_dequeue(&instance->sndqueue);
35096 }
35097 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
35098 if (!left--)
35099 return sprintf(page,
35100 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
35101 - atomic_read(&atm_dev->stats.aal5.tx),
35102 - atomic_read(&atm_dev->stats.aal5.tx_err),
35103 - atomic_read(&atm_dev->stats.aal5.rx),
35104 - atomic_read(&atm_dev->stats.aal5.rx_err),
35105 - atomic_read(&atm_dev->stats.aal5.rx_drop));
35106 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
35107 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
35108 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
35109 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
35110 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
35111
35112 if (!left--) {
35113 if (instance->disconnected)
35114 diff -urNp linux-2.6.32.42/drivers/usb/class/cdc-wdm.c linux-2.6.32.42/drivers/usb/class/cdc-wdm.c
35115 --- linux-2.6.32.42/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
35116 +++ linux-2.6.32.42/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
35117 @@ -314,7 +314,7 @@ static ssize_t wdm_write
35118 if (r < 0)
35119 goto outnp;
35120
35121 - if (!file->f_flags && O_NONBLOCK)
35122 + if (!(file->f_flags & O_NONBLOCK))
35123 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
35124 &desc->flags));
35125 else
35126 diff -urNp linux-2.6.32.42/drivers/usb/core/hcd.c linux-2.6.32.42/drivers/usb/core/hcd.c
35127 --- linux-2.6.32.42/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
35128 +++ linux-2.6.32.42/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
35129 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
35130
35131 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35132
35133 -struct usb_mon_operations *mon_ops;
35134 +const struct usb_mon_operations *mon_ops;
35135
35136 /*
35137 * The registration is unlocked.
35138 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
35139 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
35140 */
35141
35142 -int usb_mon_register (struct usb_mon_operations *ops)
35143 +int usb_mon_register (const struct usb_mon_operations *ops)
35144 {
35145
35146 if (mon_ops)
35147 diff -urNp linux-2.6.32.42/drivers/usb/core/hcd.h linux-2.6.32.42/drivers/usb/core/hcd.h
35148 --- linux-2.6.32.42/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
35149 +++ linux-2.6.32.42/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
35150 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
35151 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35152
35153 struct usb_mon_operations {
35154 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
35155 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35156 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35157 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
35158 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35159 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35160 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
35161 };
35162
35163 -extern struct usb_mon_operations *mon_ops;
35164 +extern const struct usb_mon_operations *mon_ops;
35165
35166 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
35167 {
35168 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
35169 (*mon_ops->urb_complete)(bus, urb, status);
35170 }
35171
35172 -int usb_mon_register(struct usb_mon_operations *ops);
35173 +int usb_mon_register(const struct usb_mon_operations *ops);
35174 void usb_mon_deregister(void);
35175
35176 #else
35177 diff -urNp linux-2.6.32.42/drivers/usb/core/message.c linux-2.6.32.42/drivers/usb/core/message.c
35178 --- linux-2.6.32.42/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
35179 +++ linux-2.6.32.42/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
35180 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
35181 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
35182 if (buf) {
35183 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
35184 - if (len > 0) {
35185 - smallbuf = kmalloc(++len, GFP_NOIO);
35186 + if (len++ > 0) {
35187 + smallbuf = kmalloc(len, GFP_NOIO);
35188 if (!smallbuf)
35189 return buf;
35190 memcpy(smallbuf, buf, len);
35191 diff -urNp linux-2.6.32.42/drivers/usb/misc/appledisplay.c linux-2.6.32.42/drivers/usb/misc/appledisplay.c
35192 --- linux-2.6.32.42/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
35193 +++ linux-2.6.32.42/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
35194 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
35195 return pdata->msgdata[1];
35196 }
35197
35198 -static struct backlight_ops appledisplay_bl_data = {
35199 +static const struct backlight_ops appledisplay_bl_data = {
35200 .get_brightness = appledisplay_bl_get_brightness,
35201 .update_status = appledisplay_bl_update_status,
35202 };
35203 diff -urNp linux-2.6.32.42/drivers/usb/mon/mon_main.c linux-2.6.32.42/drivers/usb/mon/mon_main.c
35204 --- linux-2.6.32.42/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
35205 +++ linux-2.6.32.42/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
35206 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
35207 /*
35208 * Ops
35209 */
35210 -static struct usb_mon_operations mon_ops_0 = {
35211 +static const struct usb_mon_operations mon_ops_0 = {
35212 .urb_submit = mon_submit,
35213 .urb_submit_error = mon_submit_error,
35214 .urb_complete = mon_complete,
35215 diff -urNp linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h
35216 --- linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
35217 +++ linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
35218 @@ -192,7 +192,7 @@ struct wahc {
35219 struct list_head xfer_delayed_list;
35220 spinlock_t xfer_list_lock;
35221 struct work_struct xfer_work;
35222 - atomic_t xfer_id_count;
35223 + atomic_unchecked_t xfer_id_count;
35224 };
35225
35226
35227 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
35228 INIT_LIST_HEAD(&wa->xfer_delayed_list);
35229 spin_lock_init(&wa->xfer_list_lock);
35230 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
35231 - atomic_set(&wa->xfer_id_count, 1);
35232 + atomic_set_unchecked(&wa->xfer_id_count, 1);
35233 }
35234
35235 /**
35236 diff -urNp linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c
35237 --- linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
35238 +++ linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
35239 @@ -293,7 +293,7 @@ out:
35240 */
35241 static void wa_xfer_id_init(struct wa_xfer *xfer)
35242 {
35243 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
35244 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
35245 }
35246
35247 /*
35248 diff -urNp linux-2.6.32.42/drivers/uwb/wlp/messages.c linux-2.6.32.42/drivers/uwb/wlp/messages.c
35249 --- linux-2.6.32.42/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
35250 +++ linux-2.6.32.42/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
35251 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
35252 size_t len = skb->len;
35253 size_t used;
35254 ssize_t result;
35255 - struct wlp_nonce enonce, rnonce;
35256 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
35257 enum wlp_assc_error assc_err;
35258 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
35259 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
35260 diff -urNp linux-2.6.32.42/drivers/uwb/wlp/sysfs.c linux-2.6.32.42/drivers/uwb/wlp/sysfs.c
35261 --- linux-2.6.32.42/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
35262 +++ linux-2.6.32.42/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
35263 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
35264 return ret;
35265 }
35266
35267 -static
35268 -struct sysfs_ops wss_sysfs_ops = {
35269 +static const struct sysfs_ops wss_sysfs_ops = {
35270 .show = wlp_wss_attr_show,
35271 .store = wlp_wss_attr_store,
35272 };
35273 diff -urNp linux-2.6.32.42/drivers/video/atmel_lcdfb.c linux-2.6.32.42/drivers/video/atmel_lcdfb.c
35274 --- linux-2.6.32.42/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
35275 +++ linux-2.6.32.42/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
35276 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
35277 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
35278 }
35279
35280 -static struct backlight_ops atmel_lcdc_bl_ops = {
35281 +static const struct backlight_ops atmel_lcdc_bl_ops = {
35282 .update_status = atmel_bl_update_status,
35283 .get_brightness = atmel_bl_get_brightness,
35284 };
35285 diff -urNp linux-2.6.32.42/drivers/video/aty/aty128fb.c linux-2.6.32.42/drivers/video/aty/aty128fb.c
35286 --- linux-2.6.32.42/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
35287 +++ linux-2.6.32.42/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
35288 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
35289 return bd->props.brightness;
35290 }
35291
35292 -static struct backlight_ops aty128_bl_data = {
35293 +static const struct backlight_ops aty128_bl_data = {
35294 .get_brightness = aty128_bl_get_brightness,
35295 .update_status = aty128_bl_update_status,
35296 };
35297 diff -urNp linux-2.6.32.42/drivers/video/aty/atyfb_base.c linux-2.6.32.42/drivers/video/aty/atyfb_base.c
35298 --- linux-2.6.32.42/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
35299 +++ linux-2.6.32.42/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
35300 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
35301 return bd->props.brightness;
35302 }
35303
35304 -static struct backlight_ops aty_bl_data = {
35305 +static const struct backlight_ops aty_bl_data = {
35306 .get_brightness = aty_bl_get_brightness,
35307 .update_status = aty_bl_update_status,
35308 };
35309 diff -urNp linux-2.6.32.42/drivers/video/aty/radeon_backlight.c linux-2.6.32.42/drivers/video/aty/radeon_backlight.c
35310 --- linux-2.6.32.42/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
35311 +++ linux-2.6.32.42/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
35312 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
35313 return bd->props.brightness;
35314 }
35315
35316 -static struct backlight_ops radeon_bl_data = {
35317 +static const struct backlight_ops radeon_bl_data = {
35318 .get_brightness = radeon_bl_get_brightness,
35319 .update_status = radeon_bl_update_status,
35320 };
35321 diff -urNp linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c
35322 --- linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
35323 +++ linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
35324 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
35325 return error ? data->current_brightness : reg_val;
35326 }
35327
35328 -static struct backlight_ops adp5520_bl_ops = {
35329 +static const struct backlight_ops adp5520_bl_ops = {
35330 .update_status = adp5520_bl_update_status,
35331 .get_brightness = adp5520_bl_get_brightness,
35332 };
35333 diff -urNp linux-2.6.32.42/drivers/video/backlight/adx_bl.c linux-2.6.32.42/drivers/video/backlight/adx_bl.c
35334 --- linux-2.6.32.42/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
35335 +++ linux-2.6.32.42/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
35336 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
35337 return 1;
35338 }
35339
35340 -static struct backlight_ops adx_backlight_ops = {
35341 +static const struct backlight_ops adx_backlight_ops = {
35342 .options = 0,
35343 .update_status = adx_backlight_update_status,
35344 .get_brightness = adx_backlight_get_brightness,
35345 diff -urNp linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c
35346 --- linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
35347 +++ linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
35348 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
35349 return pwm_channel_enable(&pwmbl->pwmc);
35350 }
35351
35352 -static struct backlight_ops atmel_pwm_bl_ops = {
35353 +static const struct backlight_ops atmel_pwm_bl_ops = {
35354 .get_brightness = atmel_pwm_bl_get_intensity,
35355 .update_status = atmel_pwm_bl_set_intensity,
35356 };
35357 diff -urNp linux-2.6.32.42/drivers/video/backlight/backlight.c linux-2.6.32.42/drivers/video/backlight/backlight.c
35358 --- linux-2.6.32.42/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
35359 +++ linux-2.6.32.42/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
35360 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
35361 * ERR_PTR() or a pointer to the newly allocated device.
35362 */
35363 struct backlight_device *backlight_device_register(const char *name,
35364 - struct device *parent, void *devdata, struct backlight_ops *ops)
35365 + struct device *parent, void *devdata, const struct backlight_ops *ops)
35366 {
35367 struct backlight_device *new_bd;
35368 int rc;
35369 diff -urNp linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c
35370 --- linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
35371 +++ linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
35372 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
35373 }
35374 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
35375
35376 -static struct backlight_ops corgi_bl_ops = {
35377 +static const struct backlight_ops corgi_bl_ops = {
35378 .get_brightness = corgi_bl_get_intensity,
35379 .update_status = corgi_bl_update_status,
35380 };
35381 diff -urNp linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c
35382 --- linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
35383 +++ linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
35384 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
35385 return intensity;
35386 }
35387
35388 -static struct backlight_ops cr_backlight_ops = {
35389 +static const struct backlight_ops cr_backlight_ops = {
35390 .get_brightness = cr_backlight_get_intensity,
35391 .update_status = cr_backlight_set_intensity,
35392 };
35393 diff -urNp linux-2.6.32.42/drivers/video/backlight/da903x_bl.c linux-2.6.32.42/drivers/video/backlight/da903x_bl.c
35394 --- linux-2.6.32.42/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
35395 +++ linux-2.6.32.42/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
35396 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
35397 return data->current_brightness;
35398 }
35399
35400 -static struct backlight_ops da903x_backlight_ops = {
35401 +static const struct backlight_ops da903x_backlight_ops = {
35402 .update_status = da903x_backlight_update_status,
35403 .get_brightness = da903x_backlight_get_brightness,
35404 };
35405 diff -urNp linux-2.6.32.42/drivers/video/backlight/generic_bl.c linux-2.6.32.42/drivers/video/backlight/generic_bl.c
35406 --- linux-2.6.32.42/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
35407 +++ linux-2.6.32.42/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
35408 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
35409 }
35410 EXPORT_SYMBOL(corgibl_limit_intensity);
35411
35412 -static struct backlight_ops genericbl_ops = {
35413 +static const struct backlight_ops genericbl_ops = {
35414 .options = BL_CORE_SUSPENDRESUME,
35415 .get_brightness = genericbl_get_intensity,
35416 .update_status = genericbl_send_intensity,
35417 diff -urNp linux-2.6.32.42/drivers/video/backlight/hp680_bl.c linux-2.6.32.42/drivers/video/backlight/hp680_bl.c
35418 --- linux-2.6.32.42/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
35419 +++ linux-2.6.32.42/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
35420 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
35421 return current_intensity;
35422 }
35423
35424 -static struct backlight_ops hp680bl_ops = {
35425 +static const struct backlight_ops hp680bl_ops = {
35426 .get_brightness = hp680bl_get_intensity,
35427 .update_status = hp680bl_set_intensity,
35428 };
35429 diff -urNp linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c
35430 --- linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
35431 +++ linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
35432 @@ -93,7 +93,7 @@ out:
35433 return ret;
35434 }
35435
35436 -static struct backlight_ops jornada_bl_ops = {
35437 +static const struct backlight_ops jornada_bl_ops = {
35438 .get_brightness = jornada_bl_get_brightness,
35439 .update_status = jornada_bl_update_status,
35440 .options = BL_CORE_SUSPENDRESUME,
35441 diff -urNp linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c
35442 --- linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
35443 +++ linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
35444 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
35445 return kb3886bl_intensity;
35446 }
35447
35448 -static struct backlight_ops kb3886bl_ops = {
35449 +static const struct backlight_ops kb3886bl_ops = {
35450 .get_brightness = kb3886bl_get_intensity,
35451 .update_status = kb3886bl_send_intensity,
35452 };
35453 diff -urNp linux-2.6.32.42/drivers/video/backlight/locomolcd.c linux-2.6.32.42/drivers/video/backlight/locomolcd.c
35454 --- linux-2.6.32.42/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
35455 +++ linux-2.6.32.42/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
35456 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
35457 return current_intensity;
35458 }
35459
35460 -static struct backlight_ops locomobl_data = {
35461 +static const struct backlight_ops locomobl_data = {
35462 .get_brightness = locomolcd_get_intensity,
35463 .update_status = locomolcd_set_intensity,
35464 };
35465 diff -urNp linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c
35466 --- linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
35467 +++ linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
35468 @@ -33,7 +33,7 @@ struct dmi_match_data {
35469 unsigned long iostart;
35470 unsigned long iolen;
35471 /* Backlight operations structure. */
35472 - struct backlight_ops backlight_ops;
35473 + const struct backlight_ops backlight_ops;
35474 };
35475
35476 /* Module parameters. */
35477 diff -urNp linux-2.6.32.42/drivers/video/backlight/omap1_bl.c linux-2.6.32.42/drivers/video/backlight/omap1_bl.c
35478 --- linux-2.6.32.42/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
35479 +++ linux-2.6.32.42/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
35480 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
35481 return bl->current_intensity;
35482 }
35483
35484 -static struct backlight_ops omapbl_ops = {
35485 +static const struct backlight_ops omapbl_ops = {
35486 .get_brightness = omapbl_get_intensity,
35487 .update_status = omapbl_update_status,
35488 };
35489 diff -urNp linux-2.6.32.42/drivers/video/backlight/progear_bl.c linux-2.6.32.42/drivers/video/backlight/progear_bl.c
35490 --- linux-2.6.32.42/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
35491 +++ linux-2.6.32.42/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
35492 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
35493 return intensity - HW_LEVEL_MIN;
35494 }
35495
35496 -static struct backlight_ops progearbl_ops = {
35497 +static const struct backlight_ops progearbl_ops = {
35498 .get_brightness = progearbl_get_intensity,
35499 .update_status = progearbl_set_intensity,
35500 };
35501 diff -urNp linux-2.6.32.42/drivers/video/backlight/pwm_bl.c linux-2.6.32.42/drivers/video/backlight/pwm_bl.c
35502 --- linux-2.6.32.42/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
35503 +++ linux-2.6.32.42/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
35504 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
35505 return bl->props.brightness;
35506 }
35507
35508 -static struct backlight_ops pwm_backlight_ops = {
35509 +static const struct backlight_ops pwm_backlight_ops = {
35510 .update_status = pwm_backlight_update_status,
35511 .get_brightness = pwm_backlight_get_brightness,
35512 };
35513 diff -urNp linux-2.6.32.42/drivers/video/backlight/tosa_bl.c linux-2.6.32.42/drivers/video/backlight/tosa_bl.c
35514 --- linux-2.6.32.42/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
35515 +++ linux-2.6.32.42/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
35516 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
35517 return props->brightness;
35518 }
35519
35520 -static struct backlight_ops bl_ops = {
35521 +static const struct backlight_ops bl_ops = {
35522 .get_brightness = tosa_bl_get_brightness,
35523 .update_status = tosa_bl_update_status,
35524 };
35525 diff -urNp linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c
35526 --- linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
35527 +++ linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
35528 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
35529 return data->current_brightness;
35530 }
35531
35532 -static struct backlight_ops wm831x_backlight_ops = {
35533 +static const struct backlight_ops wm831x_backlight_ops = {
35534 .options = BL_CORE_SUSPENDRESUME,
35535 .update_status = wm831x_backlight_update_status,
35536 .get_brightness = wm831x_backlight_get_brightness,
35537 diff -urNp linux-2.6.32.42/drivers/video/bf54x-lq043fb.c linux-2.6.32.42/drivers/video/bf54x-lq043fb.c
35538 --- linux-2.6.32.42/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
35539 +++ linux-2.6.32.42/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
35540 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
35541 return 0;
35542 }
35543
35544 -static struct backlight_ops bfin_lq043fb_bl_ops = {
35545 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
35546 .get_brightness = bl_get_brightness,
35547 };
35548
35549 diff -urNp linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c
35550 --- linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
35551 +++ linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
35552 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
35553 return 0;
35554 }
35555
35556 -static struct backlight_ops bfin_lq043fb_bl_ops = {
35557 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
35558 .get_brightness = bl_get_brightness,
35559 };
35560
35561 diff -urNp linux-2.6.32.42/drivers/video/fbcmap.c linux-2.6.32.42/drivers/video/fbcmap.c
35562 --- linux-2.6.32.42/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
35563 +++ linux-2.6.32.42/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
35564 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
35565 rc = -ENODEV;
35566 goto out;
35567 }
35568 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
35569 - !info->fbops->fb_setcmap)) {
35570 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
35571 rc = -EINVAL;
35572 goto out1;
35573 }
35574 diff -urNp linux-2.6.32.42/drivers/video/fbmem.c linux-2.6.32.42/drivers/video/fbmem.c
35575 --- linux-2.6.32.42/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
35576 +++ linux-2.6.32.42/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
35577 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
35578 image->dx += image->width + 8;
35579 }
35580 } else if (rotate == FB_ROTATE_UD) {
35581 - for (x = 0; x < num && image->dx >= 0; x++) {
35582 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
35583 info->fbops->fb_imageblit(info, image);
35584 image->dx -= image->width + 8;
35585 }
35586 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
35587 image->dy += image->height + 8;
35588 }
35589 } else if (rotate == FB_ROTATE_CCW) {
35590 - for (x = 0; x < num && image->dy >= 0; x++) {
35591 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
35592 info->fbops->fb_imageblit(info, image);
35593 image->dy -= image->height + 8;
35594 }
35595 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
35596 int flags = info->flags;
35597 int ret = 0;
35598
35599 + pax_track_stack();
35600 +
35601 if (var->activate & FB_ACTIVATE_INV_MODE) {
35602 struct fb_videomode mode1, mode2;
35603
35604 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
35605 void __user *argp = (void __user *)arg;
35606 long ret = 0;
35607
35608 + pax_track_stack();
35609 +
35610 switch (cmd) {
35611 case FBIOGET_VSCREENINFO:
35612 if (!lock_fb_info(info))
35613 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
35614 return -EFAULT;
35615 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
35616 return -EINVAL;
35617 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
35618 + if (con2fb.framebuffer >= FB_MAX)
35619 return -EINVAL;
35620 if (!registered_fb[con2fb.framebuffer])
35621 request_module("fb%d", con2fb.framebuffer);
35622 diff -urNp linux-2.6.32.42/drivers/video/i810/i810_accel.c linux-2.6.32.42/drivers/video/i810/i810_accel.c
35623 --- linux-2.6.32.42/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
35624 +++ linux-2.6.32.42/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
35625 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
35626 }
35627 }
35628 printk("ringbuffer lockup!!!\n");
35629 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
35630 i810_report_error(mmio);
35631 par->dev_flags |= LOCKUP;
35632 info->pixmap.scan_align = 1;
35633 diff -urNp linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c
35634 --- linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
35635 +++ linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
35636 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
35637 return bd->props.brightness;
35638 }
35639
35640 -static struct backlight_ops nvidia_bl_ops = {
35641 +static const struct backlight_ops nvidia_bl_ops = {
35642 .get_brightness = nvidia_bl_get_brightness,
35643 .update_status = nvidia_bl_update_status,
35644 };
35645 diff -urNp linux-2.6.32.42/drivers/video/riva/fbdev.c linux-2.6.32.42/drivers/video/riva/fbdev.c
35646 --- linux-2.6.32.42/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
35647 +++ linux-2.6.32.42/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
35648 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
35649 return bd->props.brightness;
35650 }
35651
35652 -static struct backlight_ops riva_bl_ops = {
35653 +static const struct backlight_ops riva_bl_ops = {
35654 .get_brightness = riva_bl_get_brightness,
35655 .update_status = riva_bl_update_status,
35656 };
35657 diff -urNp linux-2.6.32.42/drivers/video/uvesafb.c linux-2.6.32.42/drivers/video/uvesafb.c
35658 --- linux-2.6.32.42/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
35659 +++ linux-2.6.32.42/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
35660 @@ -18,6 +18,7 @@
35661 #include <linux/fb.h>
35662 #include <linux/io.h>
35663 #include <linux/mutex.h>
35664 +#include <linux/moduleloader.h>
35665 #include <video/edid.h>
35666 #include <video/uvesafb.h>
35667 #ifdef CONFIG_X86
35668 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
35669 NULL,
35670 };
35671
35672 - return call_usermodehelper(v86d_path, argv, envp, 1);
35673 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
35674 }
35675
35676 /*
35677 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
35678 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
35679 par->pmi_setpal = par->ypan = 0;
35680 } else {
35681 +
35682 +#ifdef CONFIG_PAX_KERNEXEC
35683 +#ifdef CONFIG_MODULES
35684 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
35685 +#endif
35686 + if (!par->pmi_code) {
35687 + par->pmi_setpal = par->ypan = 0;
35688 + return 0;
35689 + }
35690 +#endif
35691 +
35692 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
35693 + task->t.regs.edi);
35694 +
35695 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35696 + pax_open_kernel();
35697 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
35698 + pax_close_kernel();
35699 +
35700 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
35701 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
35702 +#else
35703 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
35704 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
35705 +#endif
35706 +
35707 printk(KERN_INFO "uvesafb: protected mode interface info at "
35708 "%04x:%04x\n",
35709 (u16)task->t.regs.es, (u16)task->t.regs.edi);
35710 @@ -1799,6 +1822,11 @@ out:
35711 if (par->vbe_modes)
35712 kfree(par->vbe_modes);
35713
35714 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35715 + if (par->pmi_code)
35716 + module_free_exec(NULL, par->pmi_code);
35717 +#endif
35718 +
35719 framebuffer_release(info);
35720 return err;
35721 }
35722 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
35723 kfree(par->vbe_state_orig);
35724 if (par->vbe_state_saved)
35725 kfree(par->vbe_state_saved);
35726 +
35727 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35728 + if (par->pmi_code)
35729 + module_free_exec(NULL, par->pmi_code);
35730 +#endif
35731 +
35732 }
35733
35734 framebuffer_release(info);
35735 diff -urNp linux-2.6.32.42/drivers/video/vesafb.c linux-2.6.32.42/drivers/video/vesafb.c
35736 --- linux-2.6.32.42/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
35737 +++ linux-2.6.32.42/drivers/video/vesafb.c 2011-04-17 15:56:46.000000000 -0400
35738 @@ -9,6 +9,7 @@
35739 */
35740
35741 #include <linux/module.h>
35742 +#include <linux/moduleloader.h>
35743 #include <linux/kernel.h>
35744 #include <linux/errno.h>
35745 #include <linux/string.h>
35746 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
35747 static int vram_total __initdata; /* Set total amount of memory */
35748 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
35749 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
35750 -static void (*pmi_start)(void) __read_mostly;
35751 -static void (*pmi_pal) (void) __read_mostly;
35752 +static void (*pmi_start)(void) __read_only;
35753 +static void (*pmi_pal) (void) __read_only;
35754 static int depth __read_mostly;
35755 static int vga_compat __read_mostly;
35756 /* --------------------------------------------------------------------- */
35757 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
35758 unsigned int size_vmode;
35759 unsigned int size_remap;
35760 unsigned int size_total;
35761 + void *pmi_code = NULL;
35762
35763 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
35764 return -ENODEV;
35765 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
35766 size_remap = size_total;
35767 vesafb_fix.smem_len = size_remap;
35768
35769 -#ifndef __i386__
35770 - screen_info.vesapm_seg = 0;
35771 -#endif
35772 -
35773 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
35774 printk(KERN_WARNING
35775 "vesafb: cannot reserve video memory at 0x%lx\n",
35776 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
35777 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
35778 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
35779
35780 +#ifdef __i386__
35781 +
35782 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35783 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
35784 + if (!pmi_code)
35785 +#elif !defined(CONFIG_PAX_KERNEXEC)
35786 + if (0)
35787 +#endif
35788 +
35789 +#endif
35790 + screen_info.vesapm_seg = 0;
35791 +
35792 if (screen_info.vesapm_seg) {
35793 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
35794 - screen_info.vesapm_seg,screen_info.vesapm_off);
35795 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
35796 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
35797 }
35798
35799 if (screen_info.vesapm_seg < 0xc000)
35800 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
35801
35802 if (ypan || pmi_setpal) {
35803 unsigned short *pmi_base;
35804 - pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
35805 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
35806 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
35807 +
35808 + pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
35809 +
35810 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35811 + pax_open_kernel();
35812 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
35813 +#else
35814 + pmi_code = pmi_base;
35815 +#endif
35816 +
35817 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
35818 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
35819 +
35820 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35821 + pmi_start = ktva_ktla(pmi_start);
35822 + pmi_pal = ktva_ktla(pmi_pal);
35823 + pax_close_kernel();
35824 +#endif
35825 +
35826 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
35827 if (pmi_base[3]) {
35828 printk(KERN_INFO "vesafb: pmi: ports = ");
35829 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
35830 info->node, info->fix.id);
35831 return 0;
35832 err:
35833 +
35834 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35835 + module_free_exec(NULL, pmi_code);
35836 +#endif
35837 +
35838 if (info->screen_base)
35839 iounmap(info->screen_base);
35840 framebuffer_release(info);
35841 diff -urNp linux-2.6.32.42/drivers/xen/sys-hypervisor.c linux-2.6.32.42/drivers/xen/sys-hypervisor.c
35842 --- linux-2.6.32.42/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
35843 +++ linux-2.6.32.42/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
35844 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
35845 return 0;
35846 }
35847
35848 -static struct sysfs_ops hyp_sysfs_ops = {
35849 +static const struct sysfs_ops hyp_sysfs_ops = {
35850 .show = hyp_sysfs_show,
35851 .store = hyp_sysfs_store,
35852 };
35853 diff -urNp linux-2.6.32.42/fs/9p/vfs_inode.c linux-2.6.32.42/fs/9p/vfs_inode.c
35854 --- linux-2.6.32.42/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
35855 +++ linux-2.6.32.42/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
35856 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
35857 static void
35858 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
35859 {
35860 - char *s = nd_get_link(nd);
35861 + const char *s = nd_get_link(nd);
35862
35863 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
35864 IS_ERR(s) ? "<error>" : s);
35865 diff -urNp linux-2.6.32.42/fs/aio.c linux-2.6.32.42/fs/aio.c
35866 --- linux-2.6.32.42/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
35867 +++ linux-2.6.32.42/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
35868 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
35869 size += sizeof(struct io_event) * nr_events;
35870 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
35871
35872 - if (nr_pages < 0)
35873 + if (nr_pages <= 0)
35874 return -EINVAL;
35875
35876 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
35877 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
35878 struct aio_timeout to;
35879 int retry = 0;
35880
35881 + pax_track_stack();
35882 +
35883 /* needed to zero any padding within an entry (there shouldn't be
35884 * any, but C is fun!
35885 */
35886 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
35887 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
35888 {
35889 ssize_t ret;
35890 + struct iovec iovstack;
35891
35892 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
35893 kiocb->ki_nbytes, 1,
35894 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
35895 + &iovstack, &kiocb->ki_iovec);
35896 if (ret < 0)
35897 goto out;
35898
35899 + if (kiocb->ki_iovec == &iovstack) {
35900 + kiocb->ki_inline_vec = iovstack;
35901 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
35902 + }
35903 kiocb->ki_nr_segs = kiocb->ki_nbytes;
35904 kiocb->ki_cur_seg = 0;
35905 /* ki_nbytes/left now reflect bytes instead of segs */
35906 diff -urNp linux-2.6.32.42/fs/attr.c linux-2.6.32.42/fs/attr.c
35907 --- linux-2.6.32.42/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
35908 +++ linux-2.6.32.42/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
35909 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
35910 unsigned long limit;
35911
35912 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
35913 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
35914 if (limit != RLIM_INFINITY && offset > limit)
35915 goto out_sig;
35916 if (offset > inode->i_sb->s_maxbytes)
35917 diff -urNp linux-2.6.32.42/fs/autofs/root.c linux-2.6.32.42/fs/autofs/root.c
35918 --- linux-2.6.32.42/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
35919 +++ linux-2.6.32.42/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
35920 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
35921 set_bit(n,sbi->symlink_bitmap);
35922 sl = &sbi->symlink[n];
35923 sl->len = strlen(symname);
35924 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
35925 + slsize = sl->len+1;
35926 + sl->data = kmalloc(slsize, GFP_KERNEL);
35927 if (!sl->data) {
35928 clear_bit(n,sbi->symlink_bitmap);
35929 unlock_kernel();
35930 diff -urNp linux-2.6.32.42/fs/autofs4/symlink.c linux-2.6.32.42/fs/autofs4/symlink.c
35931 --- linux-2.6.32.42/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
35932 +++ linux-2.6.32.42/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
35933 @@ -15,7 +15,7 @@
35934 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
35935 {
35936 struct autofs_info *ino = autofs4_dentry_ino(dentry);
35937 - nd_set_link(nd, (char *)ino->u.symlink);
35938 + nd_set_link(nd, ino->u.symlink);
35939 return NULL;
35940 }
35941
35942 diff -urNp linux-2.6.32.42/fs/befs/linuxvfs.c linux-2.6.32.42/fs/befs/linuxvfs.c
35943 --- linux-2.6.32.42/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
35944 +++ linux-2.6.32.42/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
35945 @@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
35946 {
35947 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
35948 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
35949 - char *link = nd_get_link(nd);
35950 + const char *link = nd_get_link(nd);
35951 if (!IS_ERR(link))
35952 kfree(link);
35953 }
35954 diff -urNp linux-2.6.32.42/fs/binfmt_aout.c linux-2.6.32.42/fs/binfmt_aout.c
35955 --- linux-2.6.32.42/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
35956 +++ linux-2.6.32.42/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
35957 @@ -16,6 +16,7 @@
35958 #include <linux/string.h>
35959 #include <linux/fs.h>
35960 #include <linux/file.h>
35961 +#include <linux/security.h>
35962 #include <linux/stat.h>
35963 #include <linux/fcntl.h>
35964 #include <linux/ptrace.h>
35965 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
35966 #endif
35967 # define START_STACK(u) (u.start_stack)
35968
35969 + memset(&dump, 0, sizeof(dump));
35970 +
35971 fs = get_fs();
35972 set_fs(KERNEL_DS);
35973 has_dumped = 1;
35974 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
35975
35976 /* If the size of the dump file exceeds the rlimit, then see what would happen
35977 if we wrote the stack, but not the data area. */
35978 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
35979 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
35980 dump.u_dsize = 0;
35981
35982 /* Make sure we have enough room to write the stack and data areas. */
35983 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
35984 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
35985 dump.u_ssize = 0;
35986
35987 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
35988 dump_size = dump.u_ssize << PAGE_SHIFT;
35989 DUMP_WRITE(dump_start,dump_size);
35990 }
35991 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
35992 - set_fs(KERNEL_DS);
35993 - DUMP_WRITE(current,sizeof(*current));
35994 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
35995 end_coredump:
35996 set_fs(fs);
35997 return has_dumped;
35998 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
35999 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
36000 if (rlim >= RLIM_INFINITY)
36001 rlim = ~0;
36002 +
36003 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
36004 if (ex.a_data + ex.a_bss > rlim)
36005 return -ENOMEM;
36006
36007 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
36008 install_exec_creds(bprm);
36009 current->flags &= ~PF_FORKNOEXEC;
36010
36011 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36012 + current->mm->pax_flags = 0UL;
36013 +#endif
36014 +
36015 +#ifdef CONFIG_PAX_PAGEEXEC
36016 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
36017 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
36018 +
36019 +#ifdef CONFIG_PAX_EMUTRAMP
36020 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
36021 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
36022 +#endif
36023 +
36024 +#ifdef CONFIG_PAX_MPROTECT
36025 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
36026 + current->mm->pax_flags |= MF_PAX_MPROTECT;
36027 +#endif
36028 +
36029 + }
36030 +#endif
36031 +
36032 if (N_MAGIC(ex) == OMAGIC) {
36033 unsigned long text_addr, map_size;
36034 loff_t pos;
36035 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
36036
36037 down_write(&current->mm->mmap_sem);
36038 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
36039 - PROT_READ | PROT_WRITE | PROT_EXEC,
36040 + PROT_READ | PROT_WRITE,
36041 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
36042 fd_offset + ex.a_text);
36043 up_write(&current->mm->mmap_sem);
36044 diff -urNp linux-2.6.32.42/fs/binfmt_elf.c linux-2.6.32.42/fs/binfmt_elf.c
36045 --- linux-2.6.32.42/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
36046 +++ linux-2.6.32.42/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
36047 @@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
36048 #define elf_core_dump NULL
36049 #endif
36050
36051 +#ifdef CONFIG_PAX_MPROTECT
36052 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
36053 +#endif
36054 +
36055 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
36056 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
36057 #else
36058 @@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
36059 .load_binary = load_elf_binary,
36060 .load_shlib = load_elf_library,
36061 .core_dump = elf_core_dump,
36062 +
36063 +#ifdef CONFIG_PAX_MPROTECT
36064 + .handle_mprotect= elf_handle_mprotect,
36065 +#endif
36066 +
36067 .min_coredump = ELF_EXEC_PAGESIZE,
36068 .hasvdso = 1
36069 };
36070 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
36071
36072 static int set_brk(unsigned long start, unsigned long end)
36073 {
36074 + unsigned long e = end;
36075 +
36076 start = ELF_PAGEALIGN(start);
36077 end = ELF_PAGEALIGN(end);
36078 if (end > start) {
36079 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
36080 if (BAD_ADDR(addr))
36081 return addr;
36082 }
36083 - current->mm->start_brk = current->mm->brk = end;
36084 + current->mm->start_brk = current->mm->brk = e;
36085 return 0;
36086 }
36087
36088 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
36089 elf_addr_t __user *u_rand_bytes;
36090 const char *k_platform = ELF_PLATFORM;
36091 const char *k_base_platform = ELF_BASE_PLATFORM;
36092 - unsigned char k_rand_bytes[16];
36093 + u32 k_rand_bytes[4];
36094 int items;
36095 elf_addr_t *elf_info;
36096 int ei_index = 0;
36097 const struct cred *cred = current_cred();
36098 struct vm_area_struct *vma;
36099 + unsigned long saved_auxv[AT_VECTOR_SIZE];
36100 +
36101 + pax_track_stack();
36102
36103 /*
36104 * In some cases (e.g. Hyper-Threading), we want to avoid L1
36105 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
36106 * Generate 16 random bytes for userspace PRNG seeding.
36107 */
36108 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
36109 - u_rand_bytes = (elf_addr_t __user *)
36110 - STACK_ALLOC(p, sizeof(k_rand_bytes));
36111 + srandom32(k_rand_bytes[0] ^ random32());
36112 + srandom32(k_rand_bytes[1] ^ random32());
36113 + srandom32(k_rand_bytes[2] ^ random32());
36114 + srandom32(k_rand_bytes[3] ^ random32());
36115 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
36116 + u_rand_bytes = (elf_addr_t __user *) p;
36117 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
36118 return -EFAULT;
36119
36120 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
36121 return -EFAULT;
36122 current->mm->env_end = p;
36123
36124 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
36125 +
36126 /* Put the elf_info on the stack in the right place. */
36127 sp = (elf_addr_t __user *)envp + 1;
36128 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
36129 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
36130 return -EFAULT;
36131 return 0;
36132 }
36133 @@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
36134 {
36135 struct elf_phdr *elf_phdata;
36136 struct elf_phdr *eppnt;
36137 - unsigned long load_addr = 0;
36138 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
36139 int load_addr_set = 0;
36140 unsigned long last_bss = 0, elf_bss = 0;
36141 - unsigned long error = ~0UL;
36142 + unsigned long error = -EINVAL;
36143 unsigned long total_size;
36144 int retval, i, size;
36145
36146 @@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
36147 goto out_close;
36148 }
36149
36150 +#ifdef CONFIG_PAX_SEGMEXEC
36151 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
36152 + pax_task_size = SEGMEXEC_TASK_SIZE;
36153 +#endif
36154 +
36155 eppnt = elf_phdata;
36156 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
36157 if (eppnt->p_type == PT_LOAD) {
36158 @@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
36159 k = load_addr + eppnt->p_vaddr;
36160 if (BAD_ADDR(k) ||
36161 eppnt->p_filesz > eppnt->p_memsz ||
36162 - eppnt->p_memsz > TASK_SIZE ||
36163 - TASK_SIZE - eppnt->p_memsz < k) {
36164 + eppnt->p_memsz > pax_task_size ||
36165 + pax_task_size - eppnt->p_memsz < k) {
36166 error = -ENOMEM;
36167 goto out_close;
36168 }
36169 @@ -532,6 +557,194 @@ out:
36170 return error;
36171 }
36172
36173 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
36174 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
36175 +{
36176 + unsigned long pax_flags = 0UL;
36177 +
36178 +#ifdef CONFIG_PAX_PAGEEXEC
36179 + if (elf_phdata->p_flags & PF_PAGEEXEC)
36180 + pax_flags |= MF_PAX_PAGEEXEC;
36181 +#endif
36182 +
36183 +#ifdef CONFIG_PAX_SEGMEXEC
36184 + if (elf_phdata->p_flags & PF_SEGMEXEC)
36185 + pax_flags |= MF_PAX_SEGMEXEC;
36186 +#endif
36187 +
36188 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36189 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36190 + if (nx_enabled)
36191 + pax_flags &= ~MF_PAX_SEGMEXEC;
36192 + else
36193 + pax_flags &= ~MF_PAX_PAGEEXEC;
36194 + }
36195 +#endif
36196 +
36197 +#ifdef CONFIG_PAX_EMUTRAMP
36198 + if (elf_phdata->p_flags & PF_EMUTRAMP)
36199 + pax_flags |= MF_PAX_EMUTRAMP;
36200 +#endif
36201 +
36202 +#ifdef CONFIG_PAX_MPROTECT
36203 + if (elf_phdata->p_flags & PF_MPROTECT)
36204 + pax_flags |= MF_PAX_MPROTECT;
36205 +#endif
36206 +
36207 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36208 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
36209 + pax_flags |= MF_PAX_RANDMMAP;
36210 +#endif
36211 +
36212 + return pax_flags;
36213 +}
36214 +#endif
36215 +
36216 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36217 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
36218 +{
36219 + unsigned long pax_flags = 0UL;
36220 +
36221 +#ifdef CONFIG_PAX_PAGEEXEC
36222 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
36223 + pax_flags |= MF_PAX_PAGEEXEC;
36224 +#endif
36225 +
36226 +#ifdef CONFIG_PAX_SEGMEXEC
36227 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
36228 + pax_flags |= MF_PAX_SEGMEXEC;
36229 +#endif
36230 +
36231 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36232 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36233 + if (nx_enabled)
36234 + pax_flags &= ~MF_PAX_SEGMEXEC;
36235 + else
36236 + pax_flags &= ~MF_PAX_PAGEEXEC;
36237 + }
36238 +#endif
36239 +
36240 +#ifdef CONFIG_PAX_EMUTRAMP
36241 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
36242 + pax_flags |= MF_PAX_EMUTRAMP;
36243 +#endif
36244 +
36245 +#ifdef CONFIG_PAX_MPROTECT
36246 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
36247 + pax_flags |= MF_PAX_MPROTECT;
36248 +#endif
36249 +
36250 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36251 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
36252 + pax_flags |= MF_PAX_RANDMMAP;
36253 +#endif
36254 +
36255 + return pax_flags;
36256 +}
36257 +#endif
36258 +
36259 +#ifdef CONFIG_PAX_EI_PAX
36260 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
36261 +{
36262 + unsigned long pax_flags = 0UL;
36263 +
36264 +#ifdef CONFIG_PAX_PAGEEXEC
36265 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
36266 + pax_flags |= MF_PAX_PAGEEXEC;
36267 +#endif
36268 +
36269 +#ifdef CONFIG_PAX_SEGMEXEC
36270 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
36271 + pax_flags |= MF_PAX_SEGMEXEC;
36272 +#endif
36273 +
36274 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36275 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36276 + if (nx_enabled)
36277 + pax_flags &= ~MF_PAX_SEGMEXEC;
36278 + else
36279 + pax_flags &= ~MF_PAX_PAGEEXEC;
36280 + }
36281 +#endif
36282 +
36283 +#ifdef CONFIG_PAX_EMUTRAMP
36284 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
36285 + pax_flags |= MF_PAX_EMUTRAMP;
36286 +#endif
36287 +
36288 +#ifdef CONFIG_PAX_MPROTECT
36289 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
36290 + pax_flags |= MF_PAX_MPROTECT;
36291 +#endif
36292 +
36293 +#ifdef CONFIG_PAX_ASLR
36294 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
36295 + pax_flags |= MF_PAX_RANDMMAP;
36296 +#endif
36297 +
36298 + return pax_flags;
36299 +}
36300 +#endif
36301 +
36302 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36303 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
36304 +{
36305 + unsigned long pax_flags = 0UL;
36306 +
36307 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36308 + unsigned long i;
36309 + int found_flags = 0;
36310 +#endif
36311 +
36312 +#ifdef CONFIG_PAX_EI_PAX
36313 + pax_flags = pax_parse_ei_pax(elf_ex);
36314 +#endif
36315 +
36316 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36317 + for (i = 0UL; i < elf_ex->e_phnum; i++)
36318 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
36319 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
36320 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
36321 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
36322 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
36323 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
36324 + return -EINVAL;
36325 +
36326 +#ifdef CONFIG_PAX_SOFTMODE
36327 + if (pax_softmode)
36328 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
36329 + else
36330 +#endif
36331 +
36332 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
36333 + found_flags = 1;
36334 + break;
36335 + }
36336 +#endif
36337 +
36338 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
36339 + if (found_flags == 0) {
36340 + struct elf_phdr phdr;
36341 + memset(&phdr, 0, sizeof(phdr));
36342 + phdr.p_flags = PF_NOEMUTRAMP;
36343 +#ifdef CONFIG_PAX_SOFTMODE
36344 + if (pax_softmode)
36345 + pax_flags = pax_parse_softmode(&phdr);
36346 + else
36347 +#endif
36348 + pax_flags = pax_parse_hardmode(&phdr);
36349 + }
36350 +#endif
36351 +
36352 +
36353 + if (0 > pax_check_flags(&pax_flags))
36354 + return -EINVAL;
36355 +
36356 + current->mm->pax_flags = pax_flags;
36357 + return 0;
36358 +}
36359 +#endif
36360 +
36361 /*
36362 * These are the functions used to load ELF style executables and shared
36363 * libraries. There is no binary dependent code anywhere else.
36364 @@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
36365 {
36366 unsigned int random_variable = 0;
36367
36368 +#ifdef CONFIG_PAX_RANDUSTACK
36369 + if (randomize_va_space)
36370 + return stack_top - current->mm->delta_stack;
36371 +#endif
36372 +
36373 if ((current->flags & PF_RANDOMIZE) &&
36374 !(current->personality & ADDR_NO_RANDOMIZE)) {
36375 random_variable = get_random_int() & STACK_RND_MASK;
36376 @@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
36377 unsigned long load_addr = 0, load_bias = 0;
36378 int load_addr_set = 0;
36379 char * elf_interpreter = NULL;
36380 - unsigned long error;
36381 + unsigned long error = 0;
36382 struct elf_phdr *elf_ppnt, *elf_phdata;
36383 unsigned long elf_bss, elf_brk;
36384 int retval, i;
36385 @@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
36386 unsigned long start_code, end_code, start_data, end_data;
36387 unsigned long reloc_func_desc = 0;
36388 int executable_stack = EXSTACK_DEFAULT;
36389 - unsigned long def_flags = 0;
36390 struct {
36391 struct elfhdr elf_ex;
36392 struct elfhdr interp_elf_ex;
36393 } *loc;
36394 + unsigned long pax_task_size = TASK_SIZE;
36395
36396 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
36397 if (!loc) {
36398 @@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
36399
36400 /* OK, This is the point of no return */
36401 current->flags &= ~PF_FORKNOEXEC;
36402 - current->mm->def_flags = def_flags;
36403 +
36404 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36405 + current->mm->pax_flags = 0UL;
36406 +#endif
36407 +
36408 +#ifdef CONFIG_PAX_DLRESOLVE
36409 + current->mm->call_dl_resolve = 0UL;
36410 +#endif
36411 +
36412 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
36413 + current->mm->call_syscall = 0UL;
36414 +#endif
36415 +
36416 +#ifdef CONFIG_PAX_ASLR
36417 + current->mm->delta_mmap = 0UL;
36418 + current->mm->delta_stack = 0UL;
36419 +#endif
36420 +
36421 + current->mm->def_flags = 0;
36422 +
36423 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36424 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
36425 + send_sig(SIGKILL, current, 0);
36426 + goto out_free_dentry;
36427 + }
36428 +#endif
36429 +
36430 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
36431 + pax_set_initial_flags(bprm);
36432 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
36433 + if (pax_set_initial_flags_func)
36434 + (pax_set_initial_flags_func)(bprm);
36435 +#endif
36436 +
36437 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
36438 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
36439 + current->mm->context.user_cs_limit = PAGE_SIZE;
36440 + current->mm->def_flags |= VM_PAGEEXEC;
36441 + }
36442 +#endif
36443 +
36444 +#ifdef CONFIG_PAX_SEGMEXEC
36445 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
36446 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
36447 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
36448 + pax_task_size = SEGMEXEC_TASK_SIZE;
36449 + }
36450 +#endif
36451 +
36452 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
36453 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36454 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
36455 + put_cpu();
36456 + }
36457 +#endif
36458
36459 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
36460 may depend on the personality. */
36461 SET_PERSONALITY(loc->elf_ex);
36462 +
36463 +#ifdef CONFIG_PAX_ASLR
36464 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
36465 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
36466 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
36467 + }
36468 +#endif
36469 +
36470 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
36471 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36472 + executable_stack = EXSTACK_DISABLE_X;
36473 + current->personality &= ~READ_IMPLIES_EXEC;
36474 + } else
36475 +#endif
36476 +
36477 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
36478 current->personality |= READ_IMPLIES_EXEC;
36479
36480 @@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
36481 #else
36482 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
36483 #endif
36484 +
36485 +#ifdef CONFIG_PAX_RANDMMAP
36486 + /* PaX: randomize base address at the default exe base if requested */
36487 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
36488 +#ifdef CONFIG_SPARC64
36489 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
36490 +#else
36491 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
36492 +#endif
36493 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
36494 + elf_flags |= MAP_FIXED;
36495 + }
36496 +#endif
36497 +
36498 }
36499
36500 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
36501 @@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
36502 * allowed task size. Note that p_filesz must always be
36503 * <= p_memsz so it is only necessary to check p_memsz.
36504 */
36505 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
36506 - elf_ppnt->p_memsz > TASK_SIZE ||
36507 - TASK_SIZE - elf_ppnt->p_memsz < k) {
36508 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
36509 + elf_ppnt->p_memsz > pax_task_size ||
36510 + pax_task_size - elf_ppnt->p_memsz < k) {
36511 /* set_brk can never work. Avoid overflows. */
36512 send_sig(SIGKILL, current, 0);
36513 retval = -EINVAL;
36514 @@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
36515 start_data += load_bias;
36516 end_data += load_bias;
36517
36518 +#ifdef CONFIG_PAX_RANDMMAP
36519 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
36520 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
36521 +#endif
36522 +
36523 /* Calling set_brk effectively mmaps the pages that we need
36524 * for the bss and break sections. We must do this before
36525 * mapping in the interpreter, to make sure it doesn't wind
36526 @@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
36527 goto out_free_dentry;
36528 }
36529 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
36530 - send_sig(SIGSEGV, current, 0);
36531 - retval = -EFAULT; /* Nobody gets to see this, but.. */
36532 - goto out_free_dentry;
36533 + /*
36534 + * This bss-zeroing can fail if the ELF
36535 + * file specifies odd protections. So
36536 + * we don't check the return value
36537 + */
36538 }
36539
36540 if (elf_interpreter) {
36541 @@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
36542 unsigned long n = off;
36543 if (n > PAGE_SIZE)
36544 n = PAGE_SIZE;
36545 - if (!dump_write(file, buf, n))
36546 + if (!dump_write(file, buf, n)) {
36547 + free_page((unsigned long)buf);
36548 return 0;
36549 + }
36550 off -= n;
36551 }
36552 free_page((unsigned long)buf);
36553 @@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
36554 * Decide what to dump of a segment, part, all or none.
36555 */
36556 static unsigned long vma_dump_size(struct vm_area_struct *vma,
36557 - unsigned long mm_flags)
36558 + unsigned long mm_flags, long signr)
36559 {
36560 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
36561
36562 @@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
36563 if (vma->vm_file == NULL)
36564 return 0;
36565
36566 - if (FILTER(MAPPED_PRIVATE))
36567 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
36568 goto whole;
36569
36570 /*
36571 @@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
36572 #undef DUMP_WRITE
36573
36574 #define DUMP_WRITE(addr, nr) \
36575 + do { \
36576 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
36577 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
36578 - goto end_coredump;
36579 + goto end_coredump; \
36580 + } while (0);
36581
36582 static void fill_elf_header(struct elfhdr *elf, int segs,
36583 u16 machine, u32 flags, u8 osabi)
36584 @@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
36585 {
36586 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
36587 int i = 0;
36588 - do
36589 + do {
36590 i += 2;
36591 - while (auxv[i - 2] != AT_NULL);
36592 + } while (auxv[i - 2] != AT_NULL);
36593 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
36594 }
36595
36596 @@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
36597 phdr.p_offset = offset;
36598 phdr.p_vaddr = vma->vm_start;
36599 phdr.p_paddr = 0;
36600 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
36601 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
36602 phdr.p_memsz = vma->vm_end - vma->vm_start;
36603 offset += phdr.p_filesz;
36604 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
36605 @@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
36606 unsigned long addr;
36607 unsigned long end;
36608
36609 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
36610 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
36611
36612 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
36613 struct page *page;
36614 @@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
36615 page = get_dump_page(addr);
36616 if (page) {
36617 void *kaddr = kmap(page);
36618 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
36619 stop = ((size += PAGE_SIZE) > limit) ||
36620 !dump_write(file, kaddr, PAGE_SIZE);
36621 kunmap(page);
36622 @@ -2042,6 +2356,97 @@ out:
36623
36624 #endif /* USE_ELF_CORE_DUMP */
36625
36626 +#ifdef CONFIG_PAX_MPROTECT
36627 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
36628 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
36629 + * we'll remove VM_MAYWRITE for good on RELRO segments.
36630 + *
36631 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
36632 + * basis because we want to allow the common case and not the special ones.
36633 + */
36634 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
36635 +{
36636 + struct elfhdr elf_h;
36637 + struct elf_phdr elf_p;
36638 + unsigned long i;
36639 + unsigned long oldflags;
36640 + bool is_textrel_rw, is_textrel_rx, is_relro;
36641 +
36642 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
36643 + return;
36644 +
36645 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
36646 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
36647 +
36648 +#ifdef CONFIG_PAX_ELFRELOCS
36649 + /* possible TEXTREL */
36650 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
36651 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
36652 +#else
36653 + is_textrel_rw = false;
36654 + is_textrel_rx = false;
36655 +#endif
36656 +
36657 + /* possible RELRO */
36658 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
36659 +
36660 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
36661 + return;
36662 +
36663 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
36664 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
36665 +
36666 +#ifdef CONFIG_PAX_ETEXECRELOCS
36667 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
36668 +#else
36669 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
36670 +#endif
36671 +
36672 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
36673 + !elf_check_arch(&elf_h) ||
36674 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
36675 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
36676 + return;
36677 +
36678 + for (i = 0UL; i < elf_h.e_phnum; i++) {
36679 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
36680 + return;
36681 + switch (elf_p.p_type) {
36682 + case PT_DYNAMIC:
36683 + if (!is_textrel_rw && !is_textrel_rx)
36684 + continue;
36685 + i = 0UL;
36686 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
36687 + elf_dyn dyn;
36688 +
36689 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
36690 + return;
36691 + if (dyn.d_tag == DT_NULL)
36692 + return;
36693 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
36694 + gr_log_textrel(vma);
36695 + if (is_textrel_rw)
36696 + vma->vm_flags |= VM_MAYWRITE;
36697 + else
36698 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
36699 + vma->vm_flags &= ~VM_MAYWRITE;
36700 + return;
36701 + }
36702 + i++;
36703 + }
36704 + return;
36705 +
36706 + case PT_GNU_RELRO:
36707 + if (!is_relro)
36708 + continue;
36709 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
36710 + vma->vm_flags &= ~VM_MAYWRITE;
36711 + return;
36712 + }
36713 + }
36714 +}
36715 +#endif
36716 +
36717 static int __init init_elf_binfmt(void)
36718 {
36719 return register_binfmt(&elf_format);
36720 diff -urNp linux-2.6.32.42/fs/binfmt_flat.c linux-2.6.32.42/fs/binfmt_flat.c
36721 --- linux-2.6.32.42/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
36722 +++ linux-2.6.32.42/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
36723 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
36724 realdatastart = (unsigned long) -ENOMEM;
36725 printk("Unable to allocate RAM for process data, errno %d\n",
36726 (int)-realdatastart);
36727 + down_write(&current->mm->mmap_sem);
36728 do_munmap(current->mm, textpos, text_len);
36729 + up_write(&current->mm->mmap_sem);
36730 ret = realdatastart;
36731 goto err;
36732 }
36733 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
36734 }
36735 if (IS_ERR_VALUE(result)) {
36736 printk("Unable to read data+bss, errno %d\n", (int)-result);
36737 + down_write(&current->mm->mmap_sem);
36738 do_munmap(current->mm, textpos, text_len);
36739 do_munmap(current->mm, realdatastart, data_len + extra);
36740 + up_write(&current->mm->mmap_sem);
36741 ret = result;
36742 goto err;
36743 }
36744 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
36745 }
36746 if (IS_ERR_VALUE(result)) {
36747 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
36748 + down_write(&current->mm->mmap_sem);
36749 do_munmap(current->mm, textpos, text_len + data_len + extra +
36750 MAX_SHARED_LIBS * sizeof(unsigned long));
36751 + up_write(&current->mm->mmap_sem);
36752 ret = result;
36753 goto err;
36754 }
36755 diff -urNp linux-2.6.32.42/fs/bio.c linux-2.6.32.42/fs/bio.c
36756 --- linux-2.6.32.42/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
36757 +++ linux-2.6.32.42/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
36758 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
36759
36760 i = 0;
36761 while (i < bio_slab_nr) {
36762 - struct bio_slab *bslab = &bio_slabs[i];
36763 + bslab = &bio_slabs[i];
36764
36765 if (!bslab->slab && entry == -1)
36766 entry = i;
36767 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
36768 const int read = bio_data_dir(bio) == READ;
36769 struct bio_map_data *bmd = bio->bi_private;
36770 int i;
36771 - char *p = bmd->sgvecs[0].iov_base;
36772 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
36773
36774 __bio_for_each_segment(bvec, bio, i, 0) {
36775 char *addr = page_address(bvec->bv_page);
36776 diff -urNp linux-2.6.32.42/fs/block_dev.c linux-2.6.32.42/fs/block_dev.c
36777 --- linux-2.6.32.42/fs/block_dev.c 2011-06-25 12:55:34.000000000 -0400
36778 +++ linux-2.6.32.42/fs/block_dev.c 2011-06-25 12:56:37.000000000 -0400
36779 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
36780 else if (bdev->bd_contains == bdev)
36781 res = 0; /* is a whole device which isn't held */
36782
36783 - else if (bdev->bd_contains->bd_holder == bd_claim)
36784 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
36785 res = 0; /* is a partition of a device that is being partitioned */
36786 else if (bdev->bd_contains->bd_holder != NULL)
36787 res = -EBUSY; /* is a partition of a held device */
36788 diff -urNp linux-2.6.32.42/fs/btrfs/ctree.c linux-2.6.32.42/fs/btrfs/ctree.c
36789 --- linux-2.6.32.42/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
36790 +++ linux-2.6.32.42/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
36791 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
36792 free_extent_buffer(buf);
36793 add_root_to_dirty_list(root);
36794 } else {
36795 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
36796 - parent_start = parent->start;
36797 - else
36798 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
36799 + if (parent)
36800 + parent_start = parent->start;
36801 + else
36802 + parent_start = 0;
36803 + } else
36804 parent_start = 0;
36805
36806 WARN_ON(trans->transid != btrfs_header_generation(parent));
36807 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
36808
36809 ret = 0;
36810 if (slot == 0) {
36811 - struct btrfs_disk_key disk_key;
36812 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
36813 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
36814 }
36815 diff -urNp linux-2.6.32.42/fs/btrfs/disk-io.c linux-2.6.32.42/fs/btrfs/disk-io.c
36816 --- linux-2.6.32.42/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
36817 +++ linux-2.6.32.42/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
36818 @@ -39,7 +39,7 @@
36819 #include "tree-log.h"
36820 #include "free-space-cache.h"
36821
36822 -static struct extent_io_ops btree_extent_io_ops;
36823 +static const struct extent_io_ops btree_extent_io_ops;
36824 static void end_workqueue_fn(struct btrfs_work *work);
36825 static void free_fs_root(struct btrfs_root *root);
36826
36827 @@ -2607,7 +2607,7 @@ out:
36828 return 0;
36829 }
36830
36831 -static struct extent_io_ops btree_extent_io_ops = {
36832 +static const struct extent_io_ops btree_extent_io_ops = {
36833 .write_cache_pages_lock_hook = btree_lock_page_hook,
36834 .readpage_end_io_hook = btree_readpage_end_io_hook,
36835 .submit_bio_hook = btree_submit_bio_hook,
36836 diff -urNp linux-2.6.32.42/fs/btrfs/extent_io.h linux-2.6.32.42/fs/btrfs/extent_io.h
36837 --- linux-2.6.32.42/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
36838 +++ linux-2.6.32.42/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
36839 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
36840 struct bio *bio, int mirror_num,
36841 unsigned long bio_flags);
36842 struct extent_io_ops {
36843 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
36844 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
36845 u64 start, u64 end, int *page_started,
36846 unsigned long *nr_written);
36847 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
36848 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
36849 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
36850 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
36851 extent_submit_bio_hook_t *submit_bio_hook;
36852 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
36853 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
36854 size_t size, struct bio *bio,
36855 unsigned long bio_flags);
36856 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
36857 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
36858 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
36859 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
36860 u64 start, u64 end,
36861 struct extent_state *state);
36862 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
36863 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
36864 u64 start, u64 end,
36865 struct extent_state *state);
36866 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
36867 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
36868 struct extent_state *state);
36869 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
36870 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
36871 struct extent_state *state, int uptodate);
36872 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
36873 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
36874 unsigned long old, unsigned long bits);
36875 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
36876 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
36877 unsigned long bits);
36878 - int (*merge_extent_hook)(struct inode *inode,
36879 + int (* const merge_extent_hook)(struct inode *inode,
36880 struct extent_state *new,
36881 struct extent_state *other);
36882 - int (*split_extent_hook)(struct inode *inode,
36883 + int (* const split_extent_hook)(struct inode *inode,
36884 struct extent_state *orig, u64 split);
36885 - int (*write_cache_pages_lock_hook)(struct page *page);
36886 + int (* const write_cache_pages_lock_hook)(struct page *page);
36887 };
36888
36889 struct extent_io_tree {
36890 @@ -88,7 +88,7 @@ struct extent_io_tree {
36891 u64 dirty_bytes;
36892 spinlock_t lock;
36893 spinlock_t buffer_lock;
36894 - struct extent_io_ops *ops;
36895 + const struct extent_io_ops *ops;
36896 };
36897
36898 struct extent_state {
36899 diff -urNp linux-2.6.32.42/fs/btrfs/extent-tree.c linux-2.6.32.42/fs/btrfs/extent-tree.c
36900 --- linux-2.6.32.42/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
36901 +++ linux-2.6.32.42/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
36902 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
36903 u64 group_start = group->key.objectid;
36904 new_extents = kmalloc(sizeof(*new_extents),
36905 GFP_NOFS);
36906 + if (!new_extents) {
36907 + ret = -ENOMEM;
36908 + goto out;
36909 + }
36910 nr_extents = 1;
36911 ret = get_new_locations(reloc_inode,
36912 extent_key,
36913 diff -urNp linux-2.6.32.42/fs/btrfs/free-space-cache.c linux-2.6.32.42/fs/btrfs/free-space-cache.c
36914 --- linux-2.6.32.42/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
36915 +++ linux-2.6.32.42/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
36916 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
36917
36918 while(1) {
36919 if (entry->bytes < bytes || entry->offset < min_start) {
36920 - struct rb_node *node;
36921 -
36922 node = rb_next(&entry->offset_index);
36923 if (!node)
36924 break;
36925 @@ -1226,7 +1224,7 @@ again:
36926 */
36927 while (entry->bitmap || found_bitmap ||
36928 (!entry->bitmap && entry->bytes < min_bytes)) {
36929 - struct rb_node *node = rb_next(&entry->offset_index);
36930 + node = rb_next(&entry->offset_index);
36931
36932 if (entry->bitmap && entry->bytes > bytes + empty_size) {
36933 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
36934 diff -urNp linux-2.6.32.42/fs/btrfs/inode.c linux-2.6.32.42/fs/btrfs/inode.c
36935 --- linux-2.6.32.42/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
36936 +++ linux-2.6.32.42/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
36937 @@ -63,7 +63,7 @@ static const struct inode_operations btr
36938 static const struct address_space_operations btrfs_aops;
36939 static const struct address_space_operations btrfs_symlink_aops;
36940 static const struct file_operations btrfs_dir_file_operations;
36941 -static struct extent_io_ops btrfs_extent_io_ops;
36942 +static const struct extent_io_ops btrfs_extent_io_ops;
36943
36944 static struct kmem_cache *btrfs_inode_cachep;
36945 struct kmem_cache *btrfs_trans_handle_cachep;
36946 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
36947 1, 0, NULL, GFP_NOFS);
36948 while (start < end) {
36949 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
36950 + BUG_ON(!async_cow);
36951 async_cow->inode = inode;
36952 async_cow->root = root;
36953 async_cow->locked_page = locked_page;
36954 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
36955 inline_size = btrfs_file_extent_inline_item_len(leaf,
36956 btrfs_item_nr(leaf, path->slots[0]));
36957 tmp = kmalloc(inline_size, GFP_NOFS);
36958 + if (!tmp)
36959 + return -ENOMEM;
36960 ptr = btrfs_file_extent_inline_start(item);
36961
36962 read_extent_buffer(leaf, tmp, ptr, inline_size);
36963 @@ -5410,7 +5413,7 @@ fail:
36964 return -ENOMEM;
36965 }
36966
36967 -static int btrfs_getattr(struct vfsmount *mnt,
36968 +int btrfs_getattr(struct vfsmount *mnt,
36969 struct dentry *dentry, struct kstat *stat)
36970 {
36971 struct inode *inode = dentry->d_inode;
36972 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
36973 return 0;
36974 }
36975
36976 +EXPORT_SYMBOL(btrfs_getattr);
36977 +
36978 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
36979 +{
36980 + return BTRFS_I(inode)->root->anon_super.s_dev;
36981 +}
36982 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
36983 +
36984 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
36985 struct inode *new_dir, struct dentry *new_dentry)
36986 {
36987 @@ -5972,7 +5983,7 @@ static const struct file_operations btrf
36988 .fsync = btrfs_sync_file,
36989 };
36990
36991 -static struct extent_io_ops btrfs_extent_io_ops = {
36992 +static const struct extent_io_ops btrfs_extent_io_ops = {
36993 .fill_delalloc = run_delalloc_range,
36994 .submit_bio_hook = btrfs_submit_bio_hook,
36995 .merge_bio_hook = btrfs_merge_bio_hook,
36996 diff -urNp linux-2.6.32.42/fs/btrfs/relocation.c linux-2.6.32.42/fs/btrfs/relocation.c
36997 --- linux-2.6.32.42/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
36998 +++ linux-2.6.32.42/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
36999 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
37000 }
37001 spin_unlock(&rc->reloc_root_tree.lock);
37002
37003 - BUG_ON((struct btrfs_root *)node->data != root);
37004 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
37005
37006 if (!del) {
37007 spin_lock(&rc->reloc_root_tree.lock);
37008 diff -urNp linux-2.6.32.42/fs/btrfs/sysfs.c linux-2.6.32.42/fs/btrfs/sysfs.c
37009 --- linux-2.6.32.42/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
37010 +++ linux-2.6.32.42/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
37011 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
37012 complete(&root->kobj_unregister);
37013 }
37014
37015 -static struct sysfs_ops btrfs_super_attr_ops = {
37016 +static const struct sysfs_ops btrfs_super_attr_ops = {
37017 .show = btrfs_super_attr_show,
37018 .store = btrfs_super_attr_store,
37019 };
37020
37021 -static struct sysfs_ops btrfs_root_attr_ops = {
37022 +static const struct sysfs_ops btrfs_root_attr_ops = {
37023 .show = btrfs_root_attr_show,
37024 .store = btrfs_root_attr_store,
37025 };
37026 diff -urNp linux-2.6.32.42/fs/buffer.c linux-2.6.32.42/fs/buffer.c
37027 --- linux-2.6.32.42/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
37028 +++ linux-2.6.32.42/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
37029 @@ -25,6 +25,7 @@
37030 #include <linux/percpu.h>
37031 #include <linux/slab.h>
37032 #include <linux/capability.h>
37033 +#include <linux/security.h>
37034 #include <linux/blkdev.h>
37035 #include <linux/file.h>
37036 #include <linux/quotaops.h>
37037 diff -urNp linux-2.6.32.42/fs/cachefiles/bind.c linux-2.6.32.42/fs/cachefiles/bind.c
37038 --- linux-2.6.32.42/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
37039 +++ linux-2.6.32.42/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
37040 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
37041 args);
37042
37043 /* start by checking things over */
37044 - ASSERT(cache->fstop_percent >= 0 &&
37045 - cache->fstop_percent < cache->fcull_percent &&
37046 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
37047 cache->fcull_percent < cache->frun_percent &&
37048 cache->frun_percent < 100);
37049
37050 - ASSERT(cache->bstop_percent >= 0 &&
37051 - cache->bstop_percent < cache->bcull_percent &&
37052 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
37053 cache->bcull_percent < cache->brun_percent &&
37054 cache->brun_percent < 100);
37055
37056 diff -urNp linux-2.6.32.42/fs/cachefiles/daemon.c linux-2.6.32.42/fs/cachefiles/daemon.c
37057 --- linux-2.6.32.42/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
37058 +++ linux-2.6.32.42/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
37059 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
37060 if (test_bit(CACHEFILES_DEAD, &cache->flags))
37061 return -EIO;
37062
37063 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
37064 + if (datalen > PAGE_SIZE - 1)
37065 return -EOPNOTSUPP;
37066
37067 /* drag the command string into the kernel so we can parse it */
37068 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
37069 if (args[0] != '%' || args[1] != '\0')
37070 return -EINVAL;
37071
37072 - if (fstop < 0 || fstop >= cache->fcull_percent)
37073 + if (fstop >= cache->fcull_percent)
37074 return cachefiles_daemon_range_error(cache, args);
37075
37076 cache->fstop_percent = fstop;
37077 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
37078 if (args[0] != '%' || args[1] != '\0')
37079 return -EINVAL;
37080
37081 - if (bstop < 0 || bstop >= cache->bcull_percent)
37082 + if (bstop >= cache->bcull_percent)
37083 return cachefiles_daemon_range_error(cache, args);
37084
37085 cache->bstop_percent = bstop;
37086 diff -urNp linux-2.6.32.42/fs/cachefiles/internal.h linux-2.6.32.42/fs/cachefiles/internal.h
37087 --- linux-2.6.32.42/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
37088 +++ linux-2.6.32.42/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
37089 @@ -56,7 +56,7 @@ struct cachefiles_cache {
37090 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
37091 struct rb_root active_nodes; /* active nodes (can't be culled) */
37092 rwlock_t active_lock; /* lock for active_nodes */
37093 - atomic_t gravecounter; /* graveyard uniquifier */
37094 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
37095 unsigned frun_percent; /* when to stop culling (% files) */
37096 unsigned fcull_percent; /* when to start culling (% files) */
37097 unsigned fstop_percent; /* when to stop allocating (% files) */
37098 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
37099 * proc.c
37100 */
37101 #ifdef CONFIG_CACHEFILES_HISTOGRAM
37102 -extern atomic_t cachefiles_lookup_histogram[HZ];
37103 -extern atomic_t cachefiles_mkdir_histogram[HZ];
37104 -extern atomic_t cachefiles_create_histogram[HZ];
37105 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37106 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37107 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
37108
37109 extern int __init cachefiles_proc_init(void);
37110 extern void cachefiles_proc_cleanup(void);
37111 static inline
37112 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
37113 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
37114 {
37115 unsigned long jif = jiffies - start_jif;
37116 if (jif >= HZ)
37117 jif = HZ - 1;
37118 - atomic_inc(&histogram[jif]);
37119 + atomic_inc_unchecked(&histogram[jif]);
37120 }
37121
37122 #else
37123 diff -urNp linux-2.6.32.42/fs/cachefiles/namei.c linux-2.6.32.42/fs/cachefiles/namei.c
37124 --- linux-2.6.32.42/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
37125 +++ linux-2.6.32.42/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
37126 @@ -250,7 +250,7 @@ try_again:
37127 /* first step is to make up a grave dentry in the graveyard */
37128 sprintf(nbuffer, "%08x%08x",
37129 (uint32_t) get_seconds(),
37130 - (uint32_t) atomic_inc_return(&cache->gravecounter));
37131 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
37132
37133 /* do the multiway lock magic */
37134 trap = lock_rename(cache->graveyard, dir);
37135 diff -urNp linux-2.6.32.42/fs/cachefiles/proc.c linux-2.6.32.42/fs/cachefiles/proc.c
37136 --- linux-2.6.32.42/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
37137 +++ linux-2.6.32.42/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
37138 @@ -14,9 +14,9 @@
37139 #include <linux/seq_file.h>
37140 #include "internal.h"
37141
37142 -atomic_t cachefiles_lookup_histogram[HZ];
37143 -atomic_t cachefiles_mkdir_histogram[HZ];
37144 -atomic_t cachefiles_create_histogram[HZ];
37145 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37146 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37147 +atomic_unchecked_t cachefiles_create_histogram[HZ];
37148
37149 /*
37150 * display the latency histogram
37151 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
37152 return 0;
37153 default:
37154 index = (unsigned long) v - 3;
37155 - x = atomic_read(&cachefiles_lookup_histogram[index]);
37156 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
37157 - z = atomic_read(&cachefiles_create_histogram[index]);
37158 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
37159 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
37160 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
37161 if (x == 0 && y == 0 && z == 0)
37162 return 0;
37163
37164 diff -urNp linux-2.6.32.42/fs/cachefiles/rdwr.c linux-2.6.32.42/fs/cachefiles/rdwr.c
37165 --- linux-2.6.32.42/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
37166 +++ linux-2.6.32.42/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
37167 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
37168 old_fs = get_fs();
37169 set_fs(KERNEL_DS);
37170 ret = file->f_op->write(
37171 - file, (const void __user *) data, len, &pos);
37172 + file, (__force const void __user *) data, len, &pos);
37173 set_fs(old_fs);
37174 kunmap(page);
37175 if (ret != len)
37176 diff -urNp linux-2.6.32.42/fs/cifs/cifs_debug.c linux-2.6.32.42/fs/cifs/cifs_debug.c
37177 --- linux-2.6.32.42/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
37178 +++ linux-2.6.32.42/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
37179 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
37180 tcon = list_entry(tmp3,
37181 struct cifsTconInfo,
37182 tcon_list);
37183 - atomic_set(&tcon->num_smbs_sent, 0);
37184 - atomic_set(&tcon->num_writes, 0);
37185 - atomic_set(&tcon->num_reads, 0);
37186 - atomic_set(&tcon->num_oplock_brks, 0);
37187 - atomic_set(&tcon->num_opens, 0);
37188 - atomic_set(&tcon->num_posixopens, 0);
37189 - atomic_set(&tcon->num_posixmkdirs, 0);
37190 - atomic_set(&tcon->num_closes, 0);
37191 - atomic_set(&tcon->num_deletes, 0);
37192 - atomic_set(&tcon->num_mkdirs, 0);
37193 - atomic_set(&tcon->num_rmdirs, 0);
37194 - atomic_set(&tcon->num_renames, 0);
37195 - atomic_set(&tcon->num_t2renames, 0);
37196 - atomic_set(&tcon->num_ffirst, 0);
37197 - atomic_set(&tcon->num_fnext, 0);
37198 - atomic_set(&tcon->num_fclose, 0);
37199 - atomic_set(&tcon->num_hardlinks, 0);
37200 - atomic_set(&tcon->num_symlinks, 0);
37201 - atomic_set(&tcon->num_locks, 0);
37202 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
37203 + atomic_set_unchecked(&tcon->num_writes, 0);
37204 + atomic_set_unchecked(&tcon->num_reads, 0);
37205 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
37206 + atomic_set_unchecked(&tcon->num_opens, 0);
37207 + atomic_set_unchecked(&tcon->num_posixopens, 0);
37208 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
37209 + atomic_set_unchecked(&tcon->num_closes, 0);
37210 + atomic_set_unchecked(&tcon->num_deletes, 0);
37211 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
37212 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
37213 + atomic_set_unchecked(&tcon->num_renames, 0);
37214 + atomic_set_unchecked(&tcon->num_t2renames, 0);
37215 + atomic_set_unchecked(&tcon->num_ffirst, 0);
37216 + atomic_set_unchecked(&tcon->num_fnext, 0);
37217 + atomic_set_unchecked(&tcon->num_fclose, 0);
37218 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
37219 + atomic_set_unchecked(&tcon->num_symlinks, 0);
37220 + atomic_set_unchecked(&tcon->num_locks, 0);
37221 }
37222 }
37223 }
37224 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
37225 if (tcon->need_reconnect)
37226 seq_puts(m, "\tDISCONNECTED ");
37227 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
37228 - atomic_read(&tcon->num_smbs_sent),
37229 - atomic_read(&tcon->num_oplock_brks));
37230 + atomic_read_unchecked(&tcon->num_smbs_sent),
37231 + atomic_read_unchecked(&tcon->num_oplock_brks));
37232 seq_printf(m, "\nReads: %d Bytes: %lld",
37233 - atomic_read(&tcon->num_reads),
37234 + atomic_read_unchecked(&tcon->num_reads),
37235 (long long)(tcon->bytes_read));
37236 seq_printf(m, "\nWrites: %d Bytes: %lld",
37237 - atomic_read(&tcon->num_writes),
37238 + atomic_read_unchecked(&tcon->num_writes),
37239 (long long)(tcon->bytes_written));
37240 seq_printf(m, "\nFlushes: %d",
37241 - atomic_read(&tcon->num_flushes));
37242 + atomic_read_unchecked(&tcon->num_flushes));
37243 seq_printf(m, "\nLocks: %d HardLinks: %d "
37244 "Symlinks: %d",
37245 - atomic_read(&tcon->num_locks),
37246 - atomic_read(&tcon->num_hardlinks),
37247 - atomic_read(&tcon->num_symlinks));
37248 + atomic_read_unchecked(&tcon->num_locks),
37249 + atomic_read_unchecked(&tcon->num_hardlinks),
37250 + atomic_read_unchecked(&tcon->num_symlinks));
37251 seq_printf(m, "\nOpens: %d Closes: %d "
37252 "Deletes: %d",
37253 - atomic_read(&tcon->num_opens),
37254 - atomic_read(&tcon->num_closes),
37255 - atomic_read(&tcon->num_deletes));
37256 + atomic_read_unchecked(&tcon->num_opens),
37257 + atomic_read_unchecked(&tcon->num_closes),
37258 + atomic_read_unchecked(&tcon->num_deletes));
37259 seq_printf(m, "\nPosix Opens: %d "
37260 "Posix Mkdirs: %d",
37261 - atomic_read(&tcon->num_posixopens),
37262 - atomic_read(&tcon->num_posixmkdirs));
37263 + atomic_read_unchecked(&tcon->num_posixopens),
37264 + atomic_read_unchecked(&tcon->num_posixmkdirs));
37265 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
37266 - atomic_read(&tcon->num_mkdirs),
37267 - atomic_read(&tcon->num_rmdirs));
37268 + atomic_read_unchecked(&tcon->num_mkdirs),
37269 + atomic_read_unchecked(&tcon->num_rmdirs));
37270 seq_printf(m, "\nRenames: %d T2 Renames %d",
37271 - atomic_read(&tcon->num_renames),
37272 - atomic_read(&tcon->num_t2renames));
37273 + atomic_read_unchecked(&tcon->num_renames),
37274 + atomic_read_unchecked(&tcon->num_t2renames));
37275 seq_printf(m, "\nFindFirst: %d FNext %d "
37276 "FClose %d",
37277 - atomic_read(&tcon->num_ffirst),
37278 - atomic_read(&tcon->num_fnext),
37279 - atomic_read(&tcon->num_fclose));
37280 + atomic_read_unchecked(&tcon->num_ffirst),
37281 + atomic_read_unchecked(&tcon->num_fnext),
37282 + atomic_read_unchecked(&tcon->num_fclose));
37283 }
37284 }
37285 }
37286 diff -urNp linux-2.6.32.42/fs/cifs/cifsglob.h linux-2.6.32.42/fs/cifs/cifsglob.h
37287 --- linux-2.6.32.42/fs/cifs/cifsglob.h 2011-03-27 14:31:47.000000000 -0400
37288 +++ linux-2.6.32.42/fs/cifs/cifsglob.h 2011-05-04 17:56:28.000000000 -0400
37289 @@ -252,28 +252,28 @@ struct cifsTconInfo {
37290 __u16 Flags; /* optional support bits */
37291 enum statusEnum tidStatus;
37292 #ifdef CONFIG_CIFS_STATS
37293 - atomic_t num_smbs_sent;
37294 - atomic_t num_writes;
37295 - atomic_t num_reads;
37296 - atomic_t num_flushes;
37297 - atomic_t num_oplock_brks;
37298 - atomic_t num_opens;
37299 - atomic_t num_closes;
37300 - atomic_t num_deletes;
37301 - atomic_t num_mkdirs;
37302 - atomic_t num_posixopens;
37303 - atomic_t num_posixmkdirs;
37304 - atomic_t num_rmdirs;
37305 - atomic_t num_renames;
37306 - atomic_t num_t2renames;
37307 - atomic_t num_ffirst;
37308 - atomic_t num_fnext;
37309 - atomic_t num_fclose;
37310 - atomic_t num_hardlinks;
37311 - atomic_t num_symlinks;
37312 - atomic_t num_locks;
37313 - atomic_t num_acl_get;
37314 - atomic_t num_acl_set;
37315 + atomic_unchecked_t num_smbs_sent;
37316 + atomic_unchecked_t num_writes;
37317 + atomic_unchecked_t num_reads;
37318 + atomic_unchecked_t num_flushes;
37319 + atomic_unchecked_t num_oplock_brks;
37320 + atomic_unchecked_t num_opens;
37321 + atomic_unchecked_t num_closes;
37322 + atomic_unchecked_t num_deletes;
37323 + atomic_unchecked_t num_mkdirs;
37324 + atomic_unchecked_t num_posixopens;
37325 + atomic_unchecked_t num_posixmkdirs;
37326 + atomic_unchecked_t num_rmdirs;
37327 + atomic_unchecked_t num_renames;
37328 + atomic_unchecked_t num_t2renames;
37329 + atomic_unchecked_t num_ffirst;
37330 + atomic_unchecked_t num_fnext;
37331 + atomic_unchecked_t num_fclose;
37332 + atomic_unchecked_t num_hardlinks;
37333 + atomic_unchecked_t num_symlinks;
37334 + atomic_unchecked_t num_locks;
37335 + atomic_unchecked_t num_acl_get;
37336 + atomic_unchecked_t num_acl_set;
37337 #ifdef CONFIG_CIFS_STATS2
37338 unsigned long long time_writes;
37339 unsigned long long time_reads;
37340 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
37341 }
37342
37343 #ifdef CONFIG_CIFS_STATS
37344 -#define cifs_stats_inc atomic_inc
37345 +#define cifs_stats_inc atomic_inc_unchecked
37346
37347 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
37348 unsigned int bytes)
37349 diff -urNp linux-2.6.32.42/fs/cifs/link.c linux-2.6.32.42/fs/cifs/link.c
37350 --- linux-2.6.32.42/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
37351 +++ linux-2.6.32.42/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
37352 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
37353
37354 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
37355 {
37356 - char *p = nd_get_link(nd);
37357 + const char *p = nd_get_link(nd);
37358 if (!IS_ERR(p))
37359 kfree(p);
37360 }
37361 diff -urNp linux-2.6.32.42/fs/coda/cache.c linux-2.6.32.42/fs/coda/cache.c
37362 --- linux-2.6.32.42/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
37363 +++ linux-2.6.32.42/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
37364 @@ -24,14 +24,14 @@
37365 #include <linux/coda_fs_i.h>
37366 #include <linux/coda_cache.h>
37367
37368 -static atomic_t permission_epoch = ATOMIC_INIT(0);
37369 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
37370
37371 /* replace or extend an acl cache hit */
37372 void coda_cache_enter(struct inode *inode, int mask)
37373 {
37374 struct coda_inode_info *cii = ITOC(inode);
37375
37376 - cii->c_cached_epoch = atomic_read(&permission_epoch);
37377 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
37378 if (cii->c_uid != current_fsuid()) {
37379 cii->c_uid = current_fsuid();
37380 cii->c_cached_perm = mask;
37381 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
37382 void coda_cache_clear_inode(struct inode *inode)
37383 {
37384 struct coda_inode_info *cii = ITOC(inode);
37385 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
37386 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
37387 }
37388
37389 /* remove all acl caches */
37390 void coda_cache_clear_all(struct super_block *sb)
37391 {
37392 - atomic_inc(&permission_epoch);
37393 + atomic_inc_unchecked(&permission_epoch);
37394 }
37395
37396
37397 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
37398
37399 hit = (mask & cii->c_cached_perm) == mask &&
37400 cii->c_uid == current_fsuid() &&
37401 - cii->c_cached_epoch == atomic_read(&permission_epoch);
37402 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
37403
37404 return hit;
37405 }
37406 diff -urNp linux-2.6.32.42/fs/compat_binfmt_elf.c linux-2.6.32.42/fs/compat_binfmt_elf.c
37407 --- linux-2.6.32.42/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
37408 +++ linux-2.6.32.42/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
37409 @@ -29,10 +29,12 @@
37410 #undef elfhdr
37411 #undef elf_phdr
37412 #undef elf_note
37413 +#undef elf_dyn
37414 #undef elf_addr_t
37415 #define elfhdr elf32_hdr
37416 #define elf_phdr elf32_phdr
37417 #define elf_note elf32_note
37418 +#define elf_dyn Elf32_Dyn
37419 #define elf_addr_t Elf32_Addr
37420
37421 /*
37422 diff -urNp linux-2.6.32.42/fs/compat.c linux-2.6.32.42/fs/compat.c
37423 --- linux-2.6.32.42/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
37424 +++ linux-2.6.32.42/fs/compat.c 2011-05-16 21:46:57.000000000 -0400
37425 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
37426
37427 struct compat_readdir_callback {
37428 struct compat_old_linux_dirent __user *dirent;
37429 + struct file * file;
37430 int result;
37431 };
37432
37433 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
37434 buf->result = -EOVERFLOW;
37435 return -EOVERFLOW;
37436 }
37437 +
37438 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37439 + return 0;
37440 +
37441 buf->result++;
37442 dirent = buf->dirent;
37443 if (!access_ok(VERIFY_WRITE, dirent,
37444 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
37445
37446 buf.result = 0;
37447 buf.dirent = dirent;
37448 + buf.file = file;
37449
37450 error = vfs_readdir(file, compat_fillonedir, &buf);
37451 if (buf.result)
37452 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
37453 struct compat_getdents_callback {
37454 struct compat_linux_dirent __user *current_dir;
37455 struct compat_linux_dirent __user *previous;
37456 + struct file * file;
37457 int count;
37458 int error;
37459 };
37460 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
37461 buf->error = -EOVERFLOW;
37462 return -EOVERFLOW;
37463 }
37464 +
37465 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37466 + return 0;
37467 +
37468 dirent = buf->previous;
37469 if (dirent) {
37470 if (__put_user(offset, &dirent->d_off))
37471 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
37472 buf.previous = NULL;
37473 buf.count = count;
37474 buf.error = 0;
37475 + buf.file = file;
37476
37477 error = vfs_readdir(file, compat_filldir, &buf);
37478 if (error >= 0)
37479 @@ -987,6 +999,7 @@ out:
37480 struct compat_getdents_callback64 {
37481 struct linux_dirent64 __user *current_dir;
37482 struct linux_dirent64 __user *previous;
37483 + struct file * file;
37484 int count;
37485 int error;
37486 };
37487 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
37488 buf->error = -EINVAL; /* only used if we fail.. */
37489 if (reclen > buf->count)
37490 return -EINVAL;
37491 +
37492 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37493 + return 0;
37494 +
37495 dirent = buf->previous;
37496
37497 if (dirent) {
37498 @@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
37499 buf.previous = NULL;
37500 buf.count = count;
37501 buf.error = 0;
37502 + buf.file = file;
37503
37504 error = vfs_readdir(file, compat_filldir64, &buf);
37505 if (error >= 0)
37506 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
37507 * verify all the pointers
37508 */
37509 ret = -EINVAL;
37510 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
37511 + if (nr_segs > UIO_MAXIOV)
37512 goto out;
37513 if (!file->f_op)
37514 goto out;
37515 @@ -1463,6 +1481,11 @@ int compat_do_execve(char * filename,
37516 compat_uptr_t __user *envp,
37517 struct pt_regs * regs)
37518 {
37519 +#ifdef CONFIG_GRKERNSEC
37520 + struct file *old_exec_file;
37521 + struct acl_subject_label *old_acl;
37522 + struct rlimit old_rlim[RLIM_NLIMITS];
37523 +#endif
37524 struct linux_binprm *bprm;
37525 struct file *file;
37526 struct files_struct *displaced;
37527 @@ -1499,6 +1522,19 @@ int compat_do_execve(char * filename,
37528 bprm->filename = filename;
37529 bprm->interp = filename;
37530
37531 + if (gr_process_user_ban()) {
37532 + retval = -EPERM;
37533 + goto out_file;
37534 + }
37535 +
37536 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
37537 + retval = -EAGAIN;
37538 + if (gr_handle_nproc())
37539 + goto out_file;
37540 + retval = -EACCES;
37541 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
37542 + goto out_file;
37543 +
37544 retval = bprm_mm_init(bprm);
37545 if (retval)
37546 goto out_file;
37547 @@ -1528,9 +1564,40 @@ int compat_do_execve(char * filename,
37548 if (retval < 0)
37549 goto out;
37550
37551 + if (!gr_tpe_allow(file)) {
37552 + retval = -EACCES;
37553 + goto out;
37554 + }
37555 +
37556 + if (gr_check_crash_exec(file)) {
37557 + retval = -EACCES;
37558 + goto out;
37559 + }
37560 +
37561 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
37562 +
37563 + gr_handle_exec_args_compat(bprm, argv);
37564 +
37565 +#ifdef CONFIG_GRKERNSEC
37566 + old_acl = current->acl;
37567 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
37568 + old_exec_file = current->exec_file;
37569 + get_file(file);
37570 + current->exec_file = file;
37571 +#endif
37572 +
37573 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
37574 + bprm->unsafe & LSM_UNSAFE_SHARE);
37575 + if (retval < 0)
37576 + goto out_fail;
37577 +
37578 retval = search_binary_handler(bprm, regs);
37579 if (retval < 0)
37580 - goto out;
37581 + goto out_fail;
37582 +#ifdef CONFIG_GRKERNSEC
37583 + if (old_exec_file)
37584 + fput(old_exec_file);
37585 +#endif
37586
37587 /* execve succeeded */
37588 current->fs->in_exec = 0;
37589 @@ -1541,6 +1608,14 @@ int compat_do_execve(char * filename,
37590 put_files_struct(displaced);
37591 return retval;
37592
37593 +out_fail:
37594 +#ifdef CONFIG_GRKERNSEC
37595 + current->acl = old_acl;
37596 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
37597 + fput(current->exec_file);
37598 + current->exec_file = old_exec_file;
37599 +#endif
37600 +
37601 out:
37602 if (bprm->mm) {
37603 acct_arg_size(bprm, 0);
37604 @@ -1711,6 +1786,8 @@ int compat_core_sys_select(int n, compat
37605 struct fdtable *fdt;
37606 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
37607
37608 + pax_track_stack();
37609 +
37610 if (n < 0)
37611 goto out_nofds;
37612
37613 diff -urNp linux-2.6.32.42/fs/compat_ioctl.c linux-2.6.32.42/fs/compat_ioctl.c
37614 --- linux-2.6.32.42/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
37615 +++ linux-2.6.32.42/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
37616 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
37617 up = (struct compat_video_spu_palette __user *) arg;
37618 err = get_user(palp, &up->palette);
37619 err |= get_user(length, &up->length);
37620 + if (err)
37621 + return -EFAULT;
37622
37623 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
37624 err = put_user(compat_ptr(palp), &up_native->palette);
37625 diff -urNp linux-2.6.32.42/fs/configfs/dir.c linux-2.6.32.42/fs/configfs/dir.c
37626 --- linux-2.6.32.42/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
37627 +++ linux-2.6.32.42/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
37628 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
37629 }
37630 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
37631 struct configfs_dirent *next;
37632 - const char * name;
37633 + const unsigned char * name;
37634 + char d_name[sizeof(next->s_dentry->d_iname)];
37635 int len;
37636
37637 next = list_entry(p, struct configfs_dirent,
37638 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
37639 continue;
37640
37641 name = configfs_get_name(next);
37642 - len = strlen(name);
37643 + if (next->s_dentry && name == next->s_dentry->d_iname) {
37644 + len = next->s_dentry->d_name.len;
37645 + memcpy(d_name, name, len);
37646 + name = d_name;
37647 + } else
37648 + len = strlen(name);
37649 if (next->s_dentry)
37650 ino = next->s_dentry->d_inode->i_ino;
37651 else
37652 diff -urNp linux-2.6.32.42/fs/dcache.c linux-2.6.32.42/fs/dcache.c
37653 --- linux-2.6.32.42/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
37654 +++ linux-2.6.32.42/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
37655 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
37656
37657 static struct kmem_cache *dentry_cache __read_mostly;
37658
37659 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
37660 -
37661 /*
37662 * This is the single most critical data structure when it comes
37663 * to the dcache: the hashtable for lookups. Somebody should try
37664 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
37665 mempages -= reserve;
37666
37667 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
37668 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
37669 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
37670
37671 dcache_init();
37672 inode_init();
37673 diff -urNp linux-2.6.32.42/fs/dlm/lockspace.c linux-2.6.32.42/fs/dlm/lockspace.c
37674 --- linux-2.6.32.42/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
37675 +++ linux-2.6.32.42/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
37676 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
37677 kfree(ls);
37678 }
37679
37680 -static struct sysfs_ops dlm_attr_ops = {
37681 +static const struct sysfs_ops dlm_attr_ops = {
37682 .show = dlm_attr_show,
37683 .store = dlm_attr_store,
37684 };
37685 diff -urNp linux-2.6.32.42/fs/ecryptfs/inode.c linux-2.6.32.42/fs/ecryptfs/inode.c
37686 --- linux-2.6.32.42/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37687 +++ linux-2.6.32.42/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
37688 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
37689 old_fs = get_fs();
37690 set_fs(get_ds());
37691 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
37692 - (char __user *)lower_buf,
37693 + (__force char __user *)lower_buf,
37694 lower_bufsiz);
37695 set_fs(old_fs);
37696 if (rc < 0)
37697 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
37698 }
37699 old_fs = get_fs();
37700 set_fs(get_ds());
37701 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
37702 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
37703 set_fs(old_fs);
37704 if (rc < 0)
37705 goto out_free;
37706 diff -urNp linux-2.6.32.42/fs/exec.c linux-2.6.32.42/fs/exec.c
37707 --- linux-2.6.32.42/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
37708 +++ linux-2.6.32.42/fs/exec.c 2011-06-25 12:56:37.000000000 -0400
37709 @@ -56,12 +56,24 @@
37710 #include <linux/fsnotify.h>
37711 #include <linux/fs_struct.h>
37712 #include <linux/pipe_fs_i.h>
37713 +#include <linux/random.h>
37714 +#include <linux/seq_file.h>
37715 +
37716 +#ifdef CONFIG_PAX_REFCOUNT
37717 +#include <linux/kallsyms.h>
37718 +#include <linux/kdebug.h>
37719 +#endif
37720
37721 #include <asm/uaccess.h>
37722 #include <asm/mmu_context.h>
37723 #include <asm/tlb.h>
37724 #include "internal.h"
37725
37726 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
37727 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
37728 +EXPORT_SYMBOL(pax_set_initial_flags_func);
37729 +#endif
37730 +
37731 int core_uses_pid;
37732 char core_pattern[CORENAME_MAX_SIZE] = "core";
37733 unsigned int core_pipe_limit;
37734 @@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
37735 goto out;
37736
37737 file = do_filp_open(AT_FDCWD, tmp,
37738 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
37739 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
37740 MAY_READ | MAY_EXEC | MAY_OPEN);
37741 putname(tmp);
37742 error = PTR_ERR(file);
37743 @@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
37744 int write)
37745 {
37746 struct page *page;
37747 - int ret;
37748
37749 -#ifdef CONFIG_STACK_GROWSUP
37750 - if (write) {
37751 - ret = expand_stack_downwards(bprm->vma, pos);
37752 - if (ret < 0)
37753 - return NULL;
37754 - }
37755 -#endif
37756 - ret = get_user_pages(current, bprm->mm, pos,
37757 - 1, write, 1, &page, NULL);
37758 - if (ret <= 0)
37759 + if (0 > expand_stack_downwards(bprm->vma, pos))
37760 + return NULL;
37761 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
37762 return NULL;
37763
37764 if (write) {
37765 @@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
37766 vma->vm_end = STACK_TOP_MAX;
37767 vma->vm_start = vma->vm_end - PAGE_SIZE;
37768 vma->vm_flags = VM_STACK_FLAGS;
37769 +
37770 +#ifdef CONFIG_PAX_SEGMEXEC
37771 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
37772 +#endif
37773 +
37774 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
37775
37776 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
37777 @@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
37778 mm->stack_vm = mm->total_vm = 1;
37779 up_write(&mm->mmap_sem);
37780 bprm->p = vma->vm_end - sizeof(void *);
37781 +
37782 +#ifdef CONFIG_PAX_RANDUSTACK
37783 + if (randomize_va_space)
37784 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
37785 +#endif
37786 +
37787 return 0;
37788 err:
37789 up_write(&mm->mmap_sem);
37790 @@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
37791 int r;
37792 mm_segment_t oldfs = get_fs();
37793 set_fs(KERNEL_DS);
37794 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
37795 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
37796 set_fs(oldfs);
37797 return r;
37798 }
37799 @@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
37800 unsigned long new_end = old_end - shift;
37801 struct mmu_gather *tlb;
37802
37803 - BUG_ON(new_start > new_end);
37804 + if (new_start >= new_end || new_start < mmap_min_addr)
37805 + return -ENOMEM;
37806
37807 /*
37808 * ensure there are no vmas between where we want to go
37809 @@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
37810 if (vma != find_vma(mm, new_start))
37811 return -EFAULT;
37812
37813 +#ifdef CONFIG_PAX_SEGMEXEC
37814 + BUG_ON(pax_find_mirror_vma(vma));
37815 +#endif
37816 +
37817 /*
37818 * cover the whole range: [new_start, old_end)
37819 */
37820 @@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
37821 stack_top = arch_align_stack(stack_top);
37822 stack_top = PAGE_ALIGN(stack_top);
37823
37824 - if (unlikely(stack_top < mmap_min_addr) ||
37825 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
37826 - return -ENOMEM;
37827 -
37828 stack_shift = vma->vm_end - stack_top;
37829
37830 bprm->p -= stack_shift;
37831 @@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
37832 bprm->exec -= stack_shift;
37833
37834 down_write(&mm->mmap_sem);
37835 +
37836 + /* Move stack pages down in memory. */
37837 + if (stack_shift) {
37838 + ret = shift_arg_pages(vma, stack_shift);
37839 + if (ret)
37840 + goto out_unlock;
37841 + }
37842 +
37843 vm_flags = VM_STACK_FLAGS;
37844
37845 /*
37846 @@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
37847 vm_flags &= ~VM_EXEC;
37848 vm_flags |= mm->def_flags;
37849
37850 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37851 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37852 + vm_flags &= ~VM_EXEC;
37853 +
37854 +#ifdef CONFIG_PAX_MPROTECT
37855 + if (mm->pax_flags & MF_PAX_MPROTECT)
37856 + vm_flags &= ~VM_MAYEXEC;
37857 +#endif
37858 +
37859 + }
37860 +#endif
37861 +
37862 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
37863 vm_flags);
37864 if (ret)
37865 goto out_unlock;
37866 BUG_ON(prev != vma);
37867
37868 - /* Move stack pages down in memory. */
37869 - if (stack_shift) {
37870 - ret = shift_arg_pages(vma, stack_shift);
37871 - if (ret)
37872 - goto out_unlock;
37873 - }
37874 -
37875 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
37876 stack_size = vma->vm_end - vma->vm_start;
37877 /*
37878 @@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
37879 int err;
37880
37881 file = do_filp_open(AT_FDCWD, name,
37882 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
37883 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
37884 MAY_EXEC | MAY_OPEN);
37885 if (IS_ERR(file))
37886 goto out;
37887 @@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
37888 old_fs = get_fs();
37889 set_fs(get_ds());
37890 /* The cast to a user pointer is valid due to the set_fs() */
37891 - result = vfs_read(file, (void __user *)addr, count, &pos);
37892 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
37893 set_fs(old_fs);
37894 return result;
37895 }
37896 @@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
37897 }
37898 rcu_read_unlock();
37899
37900 - if (p->fs->users > n_fs) {
37901 + if (atomic_read(&p->fs->users) > n_fs) {
37902 bprm->unsafe |= LSM_UNSAFE_SHARE;
37903 } else {
37904 res = -EAGAIN;
37905 @@ -1347,6 +1376,11 @@ int do_execve(char * filename,
37906 char __user *__user *envp,
37907 struct pt_regs * regs)
37908 {
37909 +#ifdef CONFIG_GRKERNSEC
37910 + struct file *old_exec_file;
37911 + struct acl_subject_label *old_acl;
37912 + struct rlimit old_rlim[RLIM_NLIMITS];
37913 +#endif
37914 struct linux_binprm *bprm;
37915 struct file *file;
37916 struct files_struct *displaced;
37917 @@ -1383,6 +1417,23 @@ int do_execve(char * filename,
37918 bprm->filename = filename;
37919 bprm->interp = filename;
37920
37921 + if (gr_process_user_ban()) {
37922 + retval = -EPERM;
37923 + goto out_file;
37924 + }
37925 +
37926 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
37927 +
37928 + if (gr_handle_nproc()) {
37929 + retval = -EAGAIN;
37930 + goto out_file;
37931 + }
37932 +
37933 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
37934 + retval = -EACCES;
37935 + goto out_file;
37936 + }
37937 +
37938 retval = bprm_mm_init(bprm);
37939 if (retval)
37940 goto out_file;
37941 @@ -1412,10 +1463,41 @@ int do_execve(char * filename,
37942 if (retval < 0)
37943 goto out;
37944
37945 + if (!gr_tpe_allow(file)) {
37946 + retval = -EACCES;
37947 + goto out;
37948 + }
37949 +
37950 + if (gr_check_crash_exec(file)) {
37951 + retval = -EACCES;
37952 + goto out;
37953 + }
37954 +
37955 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
37956 +
37957 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
37958 +
37959 +#ifdef CONFIG_GRKERNSEC
37960 + old_acl = current->acl;
37961 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
37962 + old_exec_file = current->exec_file;
37963 + get_file(file);
37964 + current->exec_file = file;
37965 +#endif
37966 +
37967 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
37968 + bprm->unsafe & LSM_UNSAFE_SHARE);
37969 + if (retval < 0)
37970 + goto out_fail;
37971 +
37972 current->flags &= ~PF_KTHREAD;
37973 retval = search_binary_handler(bprm,regs);
37974 if (retval < 0)
37975 - goto out;
37976 + goto out_fail;
37977 +#ifdef CONFIG_GRKERNSEC
37978 + if (old_exec_file)
37979 + fput(old_exec_file);
37980 +#endif
37981
37982 /* execve succeeded */
37983 current->fs->in_exec = 0;
37984 @@ -1426,6 +1508,14 @@ int do_execve(char * filename,
37985 put_files_struct(displaced);
37986 return retval;
37987
37988 +out_fail:
37989 +#ifdef CONFIG_GRKERNSEC
37990 + current->acl = old_acl;
37991 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
37992 + fput(current->exec_file);
37993 + current->exec_file = old_exec_file;
37994 +#endif
37995 +
37996 out:
37997 if (bprm->mm) {
37998 acct_arg_size(bprm, 0);
37999 @@ -1591,6 +1681,220 @@ out:
38000 return ispipe;
38001 }
38002
38003 +int pax_check_flags(unsigned long *flags)
38004 +{
38005 + int retval = 0;
38006 +
38007 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
38008 + if (*flags & MF_PAX_SEGMEXEC)
38009 + {
38010 + *flags &= ~MF_PAX_SEGMEXEC;
38011 + retval = -EINVAL;
38012 + }
38013 +#endif
38014 +
38015 + if ((*flags & MF_PAX_PAGEEXEC)
38016 +
38017 +#ifdef CONFIG_PAX_PAGEEXEC
38018 + && (*flags & MF_PAX_SEGMEXEC)
38019 +#endif
38020 +
38021 + )
38022 + {
38023 + *flags &= ~MF_PAX_PAGEEXEC;
38024 + retval = -EINVAL;
38025 + }
38026 +
38027 + if ((*flags & MF_PAX_MPROTECT)
38028 +
38029 +#ifdef CONFIG_PAX_MPROTECT
38030 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38031 +#endif
38032 +
38033 + )
38034 + {
38035 + *flags &= ~MF_PAX_MPROTECT;
38036 + retval = -EINVAL;
38037 + }
38038 +
38039 + if ((*flags & MF_PAX_EMUTRAMP)
38040 +
38041 +#ifdef CONFIG_PAX_EMUTRAMP
38042 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38043 +#endif
38044 +
38045 + )
38046 + {
38047 + *flags &= ~MF_PAX_EMUTRAMP;
38048 + retval = -EINVAL;
38049 + }
38050 +
38051 + return retval;
38052 +}
38053 +
38054 +EXPORT_SYMBOL(pax_check_flags);
38055 +
38056 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38057 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
38058 +{
38059 + struct task_struct *tsk = current;
38060 + struct mm_struct *mm = current->mm;
38061 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
38062 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
38063 + char *path_exec = NULL;
38064 + char *path_fault = NULL;
38065 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
38066 +
38067 + if (buffer_exec && buffer_fault) {
38068 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
38069 +
38070 + down_read(&mm->mmap_sem);
38071 + vma = mm->mmap;
38072 + while (vma && (!vma_exec || !vma_fault)) {
38073 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
38074 + vma_exec = vma;
38075 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
38076 + vma_fault = vma;
38077 + vma = vma->vm_next;
38078 + }
38079 + if (vma_exec) {
38080 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
38081 + if (IS_ERR(path_exec))
38082 + path_exec = "<path too long>";
38083 + else {
38084 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
38085 + if (path_exec) {
38086 + *path_exec = 0;
38087 + path_exec = buffer_exec;
38088 + } else
38089 + path_exec = "<path too long>";
38090 + }
38091 + }
38092 + if (vma_fault) {
38093 + start = vma_fault->vm_start;
38094 + end = vma_fault->vm_end;
38095 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
38096 + if (vma_fault->vm_file) {
38097 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
38098 + if (IS_ERR(path_fault))
38099 + path_fault = "<path too long>";
38100 + else {
38101 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
38102 + if (path_fault) {
38103 + *path_fault = 0;
38104 + path_fault = buffer_fault;
38105 + } else
38106 + path_fault = "<path too long>";
38107 + }
38108 + } else
38109 + path_fault = "<anonymous mapping>";
38110 + }
38111 + up_read(&mm->mmap_sem);
38112 + }
38113 + if (tsk->signal->curr_ip)
38114 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
38115 + else
38116 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
38117 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
38118 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
38119 + task_uid(tsk), task_euid(tsk), pc, sp);
38120 + free_page((unsigned long)buffer_exec);
38121 + free_page((unsigned long)buffer_fault);
38122 + pax_report_insns(pc, sp);
38123 + do_coredump(SIGKILL, SIGKILL, regs);
38124 +}
38125 +#endif
38126 +
38127 +#ifdef CONFIG_PAX_REFCOUNT
38128 +void pax_report_refcount_overflow(struct pt_regs *regs)
38129 +{
38130 + if (current->signal->curr_ip)
38131 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38132 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
38133 + else
38134 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38135 + current->comm, task_pid_nr(current), current_uid(), current_euid());
38136 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
38137 + show_regs(regs);
38138 + force_sig_specific(SIGKILL, current);
38139 +}
38140 +#endif
38141 +
38142 +#ifdef CONFIG_PAX_USERCOPY
38143 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
38144 +int object_is_on_stack(const void *obj, unsigned long len)
38145 +{
38146 + const void * const stack = task_stack_page(current);
38147 + const void * const stackend = stack + THREAD_SIZE;
38148 +
38149 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38150 + const void *frame = NULL;
38151 + const void *oldframe;
38152 +#endif
38153 +
38154 + if (obj + len < obj)
38155 + return -1;
38156 +
38157 + if (obj + len <= stack || stackend <= obj)
38158 + return 0;
38159 +
38160 + if (obj < stack || stackend < obj + len)
38161 + return -1;
38162 +
38163 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38164 + oldframe = __builtin_frame_address(1);
38165 + if (oldframe)
38166 + frame = __builtin_frame_address(2);
38167 + /*
38168 + low ----------------------------------------------> high
38169 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
38170 + ^----------------^
38171 + allow copies only within here
38172 + */
38173 + while (stack <= frame && frame < stackend) {
38174 + /* if obj + len extends past the last frame, this
38175 + check won't pass and the next frame will be 0,
38176 + causing us to bail out and correctly report
38177 + the copy as invalid
38178 + */
38179 + if (obj + len <= frame)
38180 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
38181 + oldframe = frame;
38182 + frame = *(const void * const *)frame;
38183 + }
38184 + return -1;
38185 +#else
38186 + return 1;
38187 +#endif
38188 +}
38189 +
38190 +
38191 +void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
38192 +{
38193 + if (current->signal->curr_ip)
38194 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38195 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38196 + else
38197 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38198 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38199 +
38200 + dump_stack();
38201 + gr_handle_kernel_exploit();
38202 + do_group_exit(SIGKILL);
38203 +}
38204 +#endif
38205 +
38206 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
38207 +void pax_track_stack(void)
38208 +{
38209 + unsigned long sp = (unsigned long)&sp;
38210 + if (sp < current_thread_info()->lowest_stack &&
38211 + sp > (unsigned long)task_stack_page(current))
38212 + current_thread_info()->lowest_stack = sp;
38213 +}
38214 +EXPORT_SYMBOL(pax_track_stack);
38215 +#endif
38216 +
38217 static int zap_process(struct task_struct *start)
38218 {
38219 struct task_struct *t;
38220 @@ -1793,17 +2097,17 @@ static void wait_for_dump_helpers(struct
38221 pipe = file->f_path.dentry->d_inode->i_pipe;
38222
38223 pipe_lock(pipe);
38224 - pipe->readers++;
38225 - pipe->writers--;
38226 + atomic_inc(&pipe->readers);
38227 + atomic_dec(&pipe->writers);
38228
38229 - while ((pipe->readers > 1) && (!signal_pending(current))) {
38230 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
38231 wake_up_interruptible_sync(&pipe->wait);
38232 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
38233 pipe_wait(pipe);
38234 }
38235
38236 - pipe->readers--;
38237 - pipe->writers++;
38238 + atomic_dec(&pipe->readers);
38239 + atomic_inc(&pipe->writers);
38240 pipe_unlock(pipe);
38241
38242 }
38243 @@ -1826,10 +2130,13 @@ void do_coredump(long signr, int exit_co
38244 char **helper_argv = NULL;
38245 int helper_argc = 0;
38246 int dump_count = 0;
38247 - static atomic_t core_dump_count = ATOMIC_INIT(0);
38248 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
38249
38250 audit_core_dumps(signr);
38251
38252 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
38253 + gr_handle_brute_attach(current, mm->flags);
38254 +
38255 binfmt = mm->binfmt;
38256 if (!binfmt || !binfmt->core_dump)
38257 goto fail;
38258 @@ -1874,6 +2181,8 @@ void do_coredump(long signr, int exit_co
38259 */
38260 clear_thread_flag(TIF_SIGPENDING);
38261
38262 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
38263 +
38264 /*
38265 * lock_kernel() because format_corename() is controlled by sysctl, which
38266 * uses lock_kernel()
38267 @@ -1908,7 +2217,7 @@ void do_coredump(long signr, int exit_co
38268 goto fail_unlock;
38269 }
38270
38271 - dump_count = atomic_inc_return(&core_dump_count);
38272 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
38273 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
38274 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
38275 task_tgid_vnr(current), current->comm);
38276 @@ -1972,7 +2281,7 @@ close_fail:
38277 filp_close(file, NULL);
38278 fail_dropcount:
38279 if (dump_count)
38280 - atomic_dec(&core_dump_count);
38281 + atomic_dec_unchecked(&core_dump_count);
38282 fail_unlock:
38283 if (helper_argv)
38284 argv_free(helper_argv);
38285 diff -urNp linux-2.6.32.42/fs/ext2/balloc.c linux-2.6.32.42/fs/ext2/balloc.c
38286 --- linux-2.6.32.42/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
38287 +++ linux-2.6.32.42/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
38288 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
38289
38290 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38291 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38292 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38293 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38294 sbi->s_resuid != current_fsuid() &&
38295 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38296 return 0;
38297 diff -urNp linux-2.6.32.42/fs/ext3/balloc.c linux-2.6.32.42/fs/ext3/balloc.c
38298 --- linux-2.6.32.42/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
38299 +++ linux-2.6.32.42/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
38300 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
38301
38302 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38303 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38304 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38305 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38306 sbi->s_resuid != current_fsuid() &&
38307 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38308 return 0;
38309 diff -urNp linux-2.6.32.42/fs/ext4/balloc.c linux-2.6.32.42/fs/ext4/balloc.c
38310 --- linux-2.6.32.42/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
38311 +++ linux-2.6.32.42/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
38312 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
38313 /* Hm, nope. Are (enough) root reserved blocks available? */
38314 if (sbi->s_resuid == current_fsuid() ||
38315 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
38316 - capable(CAP_SYS_RESOURCE)) {
38317 + capable_nolog(CAP_SYS_RESOURCE)) {
38318 if (free_blocks >= (nblocks + dirty_blocks))
38319 return 1;
38320 }
38321 diff -urNp linux-2.6.32.42/fs/ext4/ext4.h linux-2.6.32.42/fs/ext4/ext4.h
38322 --- linux-2.6.32.42/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
38323 +++ linux-2.6.32.42/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
38324 @@ -1078,19 +1078,19 @@ struct ext4_sb_info {
38325
38326 /* stats for buddy allocator */
38327 spinlock_t s_mb_pa_lock;
38328 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
38329 - atomic_t s_bal_success; /* we found long enough chunks */
38330 - atomic_t s_bal_allocated; /* in blocks */
38331 - atomic_t s_bal_ex_scanned; /* total extents scanned */
38332 - atomic_t s_bal_goals; /* goal hits */
38333 - atomic_t s_bal_breaks; /* too long searches */
38334 - atomic_t s_bal_2orders; /* 2^order hits */
38335 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
38336 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
38337 + atomic_unchecked_t s_bal_allocated; /* in blocks */
38338 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
38339 + atomic_unchecked_t s_bal_goals; /* goal hits */
38340 + atomic_unchecked_t s_bal_breaks; /* too long searches */
38341 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
38342 spinlock_t s_bal_lock;
38343 unsigned long s_mb_buddies_generated;
38344 unsigned long long s_mb_generation_time;
38345 - atomic_t s_mb_lost_chunks;
38346 - atomic_t s_mb_preallocated;
38347 - atomic_t s_mb_discarded;
38348 + atomic_unchecked_t s_mb_lost_chunks;
38349 + atomic_unchecked_t s_mb_preallocated;
38350 + atomic_unchecked_t s_mb_discarded;
38351 atomic_t s_lock_busy;
38352
38353 /* locality groups */
38354 diff -urNp linux-2.6.32.42/fs/ext4/mballoc.c linux-2.6.32.42/fs/ext4/mballoc.c
38355 --- linux-2.6.32.42/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
38356 +++ linux-2.6.32.42/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
38357 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
38358 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
38359
38360 if (EXT4_SB(sb)->s_mb_stats)
38361 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
38362 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
38363
38364 break;
38365 }
38366 @@ -2131,7 +2131,7 @@ repeat:
38367 ac->ac_status = AC_STATUS_CONTINUE;
38368 ac->ac_flags |= EXT4_MB_HINT_FIRST;
38369 cr = 3;
38370 - atomic_inc(&sbi->s_mb_lost_chunks);
38371 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
38372 goto repeat;
38373 }
38374 }
38375 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
38376 ext4_grpblk_t counters[16];
38377 } sg;
38378
38379 + pax_track_stack();
38380 +
38381 group--;
38382 if (group == 0)
38383 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
38384 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
38385 if (sbi->s_mb_stats) {
38386 printk(KERN_INFO
38387 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
38388 - atomic_read(&sbi->s_bal_allocated),
38389 - atomic_read(&sbi->s_bal_reqs),
38390 - atomic_read(&sbi->s_bal_success));
38391 + atomic_read_unchecked(&sbi->s_bal_allocated),
38392 + atomic_read_unchecked(&sbi->s_bal_reqs),
38393 + atomic_read_unchecked(&sbi->s_bal_success));
38394 printk(KERN_INFO
38395 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
38396 "%u 2^N hits, %u breaks, %u lost\n",
38397 - atomic_read(&sbi->s_bal_ex_scanned),
38398 - atomic_read(&sbi->s_bal_goals),
38399 - atomic_read(&sbi->s_bal_2orders),
38400 - atomic_read(&sbi->s_bal_breaks),
38401 - atomic_read(&sbi->s_mb_lost_chunks));
38402 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
38403 + atomic_read_unchecked(&sbi->s_bal_goals),
38404 + atomic_read_unchecked(&sbi->s_bal_2orders),
38405 + atomic_read_unchecked(&sbi->s_bal_breaks),
38406 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
38407 printk(KERN_INFO
38408 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
38409 sbi->s_mb_buddies_generated++,
38410 sbi->s_mb_generation_time);
38411 printk(KERN_INFO
38412 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
38413 - atomic_read(&sbi->s_mb_preallocated),
38414 - atomic_read(&sbi->s_mb_discarded));
38415 + atomic_read_unchecked(&sbi->s_mb_preallocated),
38416 + atomic_read_unchecked(&sbi->s_mb_discarded));
38417 }
38418
38419 free_percpu(sbi->s_locality_groups);
38420 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
38421 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
38422
38423 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
38424 - atomic_inc(&sbi->s_bal_reqs);
38425 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
38426 + atomic_inc_unchecked(&sbi->s_bal_reqs);
38427 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
38428 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
38429 - atomic_inc(&sbi->s_bal_success);
38430 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
38431 + atomic_inc_unchecked(&sbi->s_bal_success);
38432 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
38433 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
38434 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
38435 - atomic_inc(&sbi->s_bal_goals);
38436 + atomic_inc_unchecked(&sbi->s_bal_goals);
38437 if (ac->ac_found > sbi->s_mb_max_to_scan)
38438 - atomic_inc(&sbi->s_bal_breaks);
38439 + atomic_inc_unchecked(&sbi->s_bal_breaks);
38440 }
38441
38442 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
38443 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
38444 trace_ext4_mb_new_inode_pa(ac, pa);
38445
38446 ext4_mb_use_inode_pa(ac, pa);
38447 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38448 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38449
38450 ei = EXT4_I(ac->ac_inode);
38451 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
38452 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
38453 trace_ext4_mb_new_group_pa(ac, pa);
38454
38455 ext4_mb_use_group_pa(ac, pa);
38456 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38457 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38458
38459 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
38460 lg = ac->ac_lg;
38461 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
38462 * from the bitmap and continue.
38463 */
38464 }
38465 - atomic_add(free, &sbi->s_mb_discarded);
38466 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
38467
38468 return err;
38469 }
38470 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
38471 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
38472 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
38473 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
38474 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
38475 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
38476
38477 if (ac) {
38478 ac->ac_sb = sb;
38479 diff -urNp linux-2.6.32.42/fs/ext4/super.c linux-2.6.32.42/fs/ext4/super.c
38480 --- linux-2.6.32.42/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
38481 +++ linux-2.6.32.42/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
38482 @@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
38483 }
38484
38485
38486 -static struct sysfs_ops ext4_attr_ops = {
38487 +static const struct sysfs_ops ext4_attr_ops = {
38488 .show = ext4_attr_show,
38489 .store = ext4_attr_store,
38490 };
38491 diff -urNp linux-2.6.32.42/fs/fcntl.c linux-2.6.32.42/fs/fcntl.c
38492 --- linux-2.6.32.42/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
38493 +++ linux-2.6.32.42/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
38494 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
38495 if (err)
38496 return err;
38497
38498 + if (gr_handle_chroot_fowner(pid, type))
38499 + return -ENOENT;
38500 + if (gr_check_protected_task_fowner(pid, type))
38501 + return -EACCES;
38502 +
38503 f_modown(filp, pid, type, force);
38504 return 0;
38505 }
38506 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
38507 switch (cmd) {
38508 case F_DUPFD:
38509 case F_DUPFD_CLOEXEC:
38510 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
38511 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
38512 break;
38513 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
38514 diff -urNp linux-2.6.32.42/fs/fifo.c linux-2.6.32.42/fs/fifo.c
38515 --- linux-2.6.32.42/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
38516 +++ linux-2.6.32.42/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
38517 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
38518 */
38519 filp->f_op = &read_pipefifo_fops;
38520 pipe->r_counter++;
38521 - if (pipe->readers++ == 0)
38522 + if (atomic_inc_return(&pipe->readers) == 1)
38523 wake_up_partner(inode);
38524
38525 - if (!pipe->writers) {
38526 + if (!atomic_read(&pipe->writers)) {
38527 if ((filp->f_flags & O_NONBLOCK)) {
38528 /* suppress POLLHUP until we have
38529 * seen a writer */
38530 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
38531 * errno=ENXIO when there is no process reading the FIFO.
38532 */
38533 ret = -ENXIO;
38534 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
38535 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
38536 goto err;
38537
38538 filp->f_op = &write_pipefifo_fops;
38539 pipe->w_counter++;
38540 - if (!pipe->writers++)
38541 + if (atomic_inc_return(&pipe->writers) == 1)
38542 wake_up_partner(inode);
38543
38544 - if (!pipe->readers) {
38545 + if (!atomic_read(&pipe->readers)) {
38546 wait_for_partner(inode, &pipe->r_counter);
38547 if (signal_pending(current))
38548 goto err_wr;
38549 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
38550 */
38551 filp->f_op = &rdwr_pipefifo_fops;
38552
38553 - pipe->readers++;
38554 - pipe->writers++;
38555 + atomic_inc(&pipe->readers);
38556 + atomic_inc(&pipe->writers);
38557 pipe->r_counter++;
38558 pipe->w_counter++;
38559 - if (pipe->readers == 1 || pipe->writers == 1)
38560 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
38561 wake_up_partner(inode);
38562 break;
38563
38564 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
38565 return 0;
38566
38567 err_rd:
38568 - if (!--pipe->readers)
38569 + if (atomic_dec_and_test(&pipe->readers))
38570 wake_up_interruptible(&pipe->wait);
38571 ret = -ERESTARTSYS;
38572 goto err;
38573
38574 err_wr:
38575 - if (!--pipe->writers)
38576 + if (atomic_dec_and_test(&pipe->writers))
38577 wake_up_interruptible(&pipe->wait);
38578 ret = -ERESTARTSYS;
38579 goto err;
38580
38581 err:
38582 - if (!pipe->readers && !pipe->writers)
38583 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
38584 free_pipe_info(inode);
38585
38586 err_nocleanup:
38587 diff -urNp linux-2.6.32.42/fs/file.c linux-2.6.32.42/fs/file.c
38588 --- linux-2.6.32.42/fs/file.c 2011-03-27 14:31:47.000000000 -0400
38589 +++ linux-2.6.32.42/fs/file.c 2011-04-17 15:56:46.000000000 -0400
38590 @@ -14,6 +14,7 @@
38591 #include <linux/slab.h>
38592 #include <linux/vmalloc.h>
38593 #include <linux/file.h>
38594 +#include <linux/security.h>
38595 #include <linux/fdtable.h>
38596 #include <linux/bitops.h>
38597 #include <linux/interrupt.h>
38598 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
38599 * N.B. For clone tasks sharing a files structure, this test
38600 * will limit the total number of files that can be opened.
38601 */
38602 +
38603 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
38604 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
38605 return -EMFILE;
38606
38607 diff -urNp linux-2.6.32.42/fs/filesystems.c linux-2.6.32.42/fs/filesystems.c
38608 --- linux-2.6.32.42/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
38609 +++ linux-2.6.32.42/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
38610 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
38611 int len = dot ? dot - name : strlen(name);
38612
38613 fs = __get_fs_type(name, len);
38614 +
38615 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
38616 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
38617 +#else
38618 if (!fs && (request_module("%.*s", len, name) == 0))
38619 +#endif
38620 fs = __get_fs_type(name, len);
38621
38622 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
38623 diff -urNp linux-2.6.32.42/fs/fscache/cookie.c linux-2.6.32.42/fs/fscache/cookie.c
38624 --- linux-2.6.32.42/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
38625 +++ linux-2.6.32.42/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
38626 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
38627 parent ? (char *) parent->def->name : "<no-parent>",
38628 def->name, netfs_data);
38629
38630 - fscache_stat(&fscache_n_acquires);
38631 + fscache_stat_unchecked(&fscache_n_acquires);
38632
38633 /* if there's no parent cookie, then we don't create one here either */
38634 if (!parent) {
38635 - fscache_stat(&fscache_n_acquires_null);
38636 + fscache_stat_unchecked(&fscache_n_acquires_null);
38637 _leave(" [no parent]");
38638 return NULL;
38639 }
38640 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
38641 /* allocate and initialise a cookie */
38642 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
38643 if (!cookie) {
38644 - fscache_stat(&fscache_n_acquires_oom);
38645 + fscache_stat_unchecked(&fscache_n_acquires_oom);
38646 _leave(" [ENOMEM]");
38647 return NULL;
38648 }
38649 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
38650
38651 switch (cookie->def->type) {
38652 case FSCACHE_COOKIE_TYPE_INDEX:
38653 - fscache_stat(&fscache_n_cookie_index);
38654 + fscache_stat_unchecked(&fscache_n_cookie_index);
38655 break;
38656 case FSCACHE_COOKIE_TYPE_DATAFILE:
38657 - fscache_stat(&fscache_n_cookie_data);
38658 + fscache_stat_unchecked(&fscache_n_cookie_data);
38659 break;
38660 default:
38661 - fscache_stat(&fscache_n_cookie_special);
38662 + fscache_stat_unchecked(&fscache_n_cookie_special);
38663 break;
38664 }
38665
38666 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
38667 if (fscache_acquire_non_index_cookie(cookie) < 0) {
38668 atomic_dec(&parent->n_children);
38669 __fscache_cookie_put(cookie);
38670 - fscache_stat(&fscache_n_acquires_nobufs);
38671 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
38672 _leave(" = NULL");
38673 return NULL;
38674 }
38675 }
38676
38677 - fscache_stat(&fscache_n_acquires_ok);
38678 + fscache_stat_unchecked(&fscache_n_acquires_ok);
38679 _leave(" = %p", cookie);
38680 return cookie;
38681 }
38682 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
38683 cache = fscache_select_cache_for_object(cookie->parent);
38684 if (!cache) {
38685 up_read(&fscache_addremove_sem);
38686 - fscache_stat(&fscache_n_acquires_no_cache);
38687 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
38688 _leave(" = -ENOMEDIUM [no cache]");
38689 return -ENOMEDIUM;
38690 }
38691 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
38692 object = cache->ops->alloc_object(cache, cookie);
38693 fscache_stat_d(&fscache_n_cop_alloc_object);
38694 if (IS_ERR(object)) {
38695 - fscache_stat(&fscache_n_object_no_alloc);
38696 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
38697 ret = PTR_ERR(object);
38698 goto error;
38699 }
38700
38701 - fscache_stat(&fscache_n_object_alloc);
38702 + fscache_stat_unchecked(&fscache_n_object_alloc);
38703
38704 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
38705
38706 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
38707 struct fscache_object *object;
38708 struct hlist_node *_p;
38709
38710 - fscache_stat(&fscache_n_updates);
38711 + fscache_stat_unchecked(&fscache_n_updates);
38712
38713 if (!cookie) {
38714 - fscache_stat(&fscache_n_updates_null);
38715 + fscache_stat_unchecked(&fscache_n_updates_null);
38716 _leave(" [no cookie]");
38717 return;
38718 }
38719 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
38720 struct fscache_object *object;
38721 unsigned long event;
38722
38723 - fscache_stat(&fscache_n_relinquishes);
38724 + fscache_stat_unchecked(&fscache_n_relinquishes);
38725 if (retire)
38726 - fscache_stat(&fscache_n_relinquishes_retire);
38727 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
38728
38729 if (!cookie) {
38730 - fscache_stat(&fscache_n_relinquishes_null);
38731 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
38732 _leave(" [no cookie]");
38733 return;
38734 }
38735 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
38736
38737 /* wait for the cookie to finish being instantiated (or to fail) */
38738 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
38739 - fscache_stat(&fscache_n_relinquishes_waitcrt);
38740 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
38741 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
38742 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
38743 }
38744 diff -urNp linux-2.6.32.42/fs/fscache/internal.h linux-2.6.32.42/fs/fscache/internal.h
38745 --- linux-2.6.32.42/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
38746 +++ linux-2.6.32.42/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
38747 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
38748 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
38749 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
38750
38751 -extern atomic_t fscache_n_op_pend;
38752 -extern atomic_t fscache_n_op_run;
38753 -extern atomic_t fscache_n_op_enqueue;
38754 -extern atomic_t fscache_n_op_deferred_release;
38755 -extern atomic_t fscache_n_op_release;
38756 -extern atomic_t fscache_n_op_gc;
38757 -extern atomic_t fscache_n_op_cancelled;
38758 -extern atomic_t fscache_n_op_rejected;
38759 -
38760 -extern atomic_t fscache_n_attr_changed;
38761 -extern atomic_t fscache_n_attr_changed_ok;
38762 -extern atomic_t fscache_n_attr_changed_nobufs;
38763 -extern atomic_t fscache_n_attr_changed_nomem;
38764 -extern atomic_t fscache_n_attr_changed_calls;
38765 -
38766 -extern atomic_t fscache_n_allocs;
38767 -extern atomic_t fscache_n_allocs_ok;
38768 -extern atomic_t fscache_n_allocs_wait;
38769 -extern atomic_t fscache_n_allocs_nobufs;
38770 -extern atomic_t fscache_n_allocs_intr;
38771 -extern atomic_t fscache_n_allocs_object_dead;
38772 -extern atomic_t fscache_n_alloc_ops;
38773 -extern atomic_t fscache_n_alloc_op_waits;
38774 -
38775 -extern atomic_t fscache_n_retrievals;
38776 -extern atomic_t fscache_n_retrievals_ok;
38777 -extern atomic_t fscache_n_retrievals_wait;
38778 -extern atomic_t fscache_n_retrievals_nodata;
38779 -extern atomic_t fscache_n_retrievals_nobufs;
38780 -extern atomic_t fscache_n_retrievals_intr;
38781 -extern atomic_t fscache_n_retrievals_nomem;
38782 -extern atomic_t fscache_n_retrievals_object_dead;
38783 -extern atomic_t fscache_n_retrieval_ops;
38784 -extern atomic_t fscache_n_retrieval_op_waits;
38785 -
38786 -extern atomic_t fscache_n_stores;
38787 -extern atomic_t fscache_n_stores_ok;
38788 -extern atomic_t fscache_n_stores_again;
38789 -extern atomic_t fscache_n_stores_nobufs;
38790 -extern atomic_t fscache_n_stores_oom;
38791 -extern atomic_t fscache_n_store_ops;
38792 -extern atomic_t fscache_n_store_calls;
38793 -extern atomic_t fscache_n_store_pages;
38794 -extern atomic_t fscache_n_store_radix_deletes;
38795 -extern atomic_t fscache_n_store_pages_over_limit;
38796 -
38797 -extern atomic_t fscache_n_store_vmscan_not_storing;
38798 -extern atomic_t fscache_n_store_vmscan_gone;
38799 -extern atomic_t fscache_n_store_vmscan_busy;
38800 -extern atomic_t fscache_n_store_vmscan_cancelled;
38801 -
38802 -extern atomic_t fscache_n_marks;
38803 -extern atomic_t fscache_n_uncaches;
38804 -
38805 -extern atomic_t fscache_n_acquires;
38806 -extern atomic_t fscache_n_acquires_null;
38807 -extern atomic_t fscache_n_acquires_no_cache;
38808 -extern atomic_t fscache_n_acquires_ok;
38809 -extern atomic_t fscache_n_acquires_nobufs;
38810 -extern atomic_t fscache_n_acquires_oom;
38811 -
38812 -extern atomic_t fscache_n_updates;
38813 -extern atomic_t fscache_n_updates_null;
38814 -extern atomic_t fscache_n_updates_run;
38815 -
38816 -extern atomic_t fscache_n_relinquishes;
38817 -extern atomic_t fscache_n_relinquishes_null;
38818 -extern atomic_t fscache_n_relinquishes_waitcrt;
38819 -extern atomic_t fscache_n_relinquishes_retire;
38820 -
38821 -extern atomic_t fscache_n_cookie_index;
38822 -extern atomic_t fscache_n_cookie_data;
38823 -extern atomic_t fscache_n_cookie_special;
38824 -
38825 -extern atomic_t fscache_n_object_alloc;
38826 -extern atomic_t fscache_n_object_no_alloc;
38827 -extern atomic_t fscache_n_object_lookups;
38828 -extern atomic_t fscache_n_object_lookups_negative;
38829 -extern atomic_t fscache_n_object_lookups_positive;
38830 -extern atomic_t fscache_n_object_lookups_timed_out;
38831 -extern atomic_t fscache_n_object_created;
38832 -extern atomic_t fscache_n_object_avail;
38833 -extern atomic_t fscache_n_object_dead;
38834 -
38835 -extern atomic_t fscache_n_checkaux_none;
38836 -extern atomic_t fscache_n_checkaux_okay;
38837 -extern atomic_t fscache_n_checkaux_update;
38838 -extern atomic_t fscache_n_checkaux_obsolete;
38839 +extern atomic_unchecked_t fscache_n_op_pend;
38840 +extern atomic_unchecked_t fscache_n_op_run;
38841 +extern atomic_unchecked_t fscache_n_op_enqueue;
38842 +extern atomic_unchecked_t fscache_n_op_deferred_release;
38843 +extern atomic_unchecked_t fscache_n_op_release;
38844 +extern atomic_unchecked_t fscache_n_op_gc;
38845 +extern atomic_unchecked_t fscache_n_op_cancelled;
38846 +extern atomic_unchecked_t fscache_n_op_rejected;
38847 +
38848 +extern atomic_unchecked_t fscache_n_attr_changed;
38849 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
38850 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
38851 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
38852 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
38853 +
38854 +extern atomic_unchecked_t fscache_n_allocs;
38855 +extern atomic_unchecked_t fscache_n_allocs_ok;
38856 +extern atomic_unchecked_t fscache_n_allocs_wait;
38857 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
38858 +extern atomic_unchecked_t fscache_n_allocs_intr;
38859 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
38860 +extern atomic_unchecked_t fscache_n_alloc_ops;
38861 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
38862 +
38863 +extern atomic_unchecked_t fscache_n_retrievals;
38864 +extern atomic_unchecked_t fscache_n_retrievals_ok;
38865 +extern atomic_unchecked_t fscache_n_retrievals_wait;
38866 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
38867 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
38868 +extern atomic_unchecked_t fscache_n_retrievals_intr;
38869 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
38870 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
38871 +extern atomic_unchecked_t fscache_n_retrieval_ops;
38872 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
38873 +
38874 +extern atomic_unchecked_t fscache_n_stores;
38875 +extern atomic_unchecked_t fscache_n_stores_ok;
38876 +extern atomic_unchecked_t fscache_n_stores_again;
38877 +extern atomic_unchecked_t fscache_n_stores_nobufs;
38878 +extern atomic_unchecked_t fscache_n_stores_oom;
38879 +extern atomic_unchecked_t fscache_n_store_ops;
38880 +extern atomic_unchecked_t fscache_n_store_calls;
38881 +extern atomic_unchecked_t fscache_n_store_pages;
38882 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
38883 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
38884 +
38885 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
38886 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
38887 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
38888 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
38889 +
38890 +extern atomic_unchecked_t fscache_n_marks;
38891 +extern atomic_unchecked_t fscache_n_uncaches;
38892 +
38893 +extern atomic_unchecked_t fscache_n_acquires;
38894 +extern atomic_unchecked_t fscache_n_acquires_null;
38895 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
38896 +extern atomic_unchecked_t fscache_n_acquires_ok;
38897 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
38898 +extern atomic_unchecked_t fscache_n_acquires_oom;
38899 +
38900 +extern atomic_unchecked_t fscache_n_updates;
38901 +extern atomic_unchecked_t fscache_n_updates_null;
38902 +extern atomic_unchecked_t fscache_n_updates_run;
38903 +
38904 +extern atomic_unchecked_t fscache_n_relinquishes;
38905 +extern atomic_unchecked_t fscache_n_relinquishes_null;
38906 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
38907 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
38908 +
38909 +extern atomic_unchecked_t fscache_n_cookie_index;
38910 +extern atomic_unchecked_t fscache_n_cookie_data;
38911 +extern atomic_unchecked_t fscache_n_cookie_special;
38912 +
38913 +extern atomic_unchecked_t fscache_n_object_alloc;
38914 +extern atomic_unchecked_t fscache_n_object_no_alloc;
38915 +extern atomic_unchecked_t fscache_n_object_lookups;
38916 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
38917 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
38918 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
38919 +extern atomic_unchecked_t fscache_n_object_created;
38920 +extern atomic_unchecked_t fscache_n_object_avail;
38921 +extern atomic_unchecked_t fscache_n_object_dead;
38922 +
38923 +extern atomic_unchecked_t fscache_n_checkaux_none;
38924 +extern atomic_unchecked_t fscache_n_checkaux_okay;
38925 +extern atomic_unchecked_t fscache_n_checkaux_update;
38926 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
38927
38928 extern atomic_t fscache_n_cop_alloc_object;
38929 extern atomic_t fscache_n_cop_lookup_object;
38930 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
38931 atomic_inc(stat);
38932 }
38933
38934 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
38935 +{
38936 + atomic_inc_unchecked(stat);
38937 +}
38938 +
38939 static inline void fscache_stat_d(atomic_t *stat)
38940 {
38941 atomic_dec(stat);
38942 @@ -259,6 +264,7 @@ extern const struct file_operations fsca
38943
38944 #define __fscache_stat(stat) (NULL)
38945 #define fscache_stat(stat) do {} while (0)
38946 +#define fscache_stat_unchecked(stat) do {} while (0)
38947 #define fscache_stat_d(stat) do {} while (0)
38948 #endif
38949
38950 diff -urNp linux-2.6.32.42/fs/fscache/object.c linux-2.6.32.42/fs/fscache/object.c
38951 --- linux-2.6.32.42/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
38952 +++ linux-2.6.32.42/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
38953 @@ -144,7 +144,7 @@ static void fscache_object_state_machine
38954 /* update the object metadata on disk */
38955 case FSCACHE_OBJECT_UPDATING:
38956 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
38957 - fscache_stat(&fscache_n_updates_run);
38958 + fscache_stat_unchecked(&fscache_n_updates_run);
38959 fscache_stat(&fscache_n_cop_update_object);
38960 object->cache->ops->update_object(object);
38961 fscache_stat_d(&fscache_n_cop_update_object);
38962 @@ -233,7 +233,7 @@ static void fscache_object_state_machine
38963 spin_lock(&object->lock);
38964 object->state = FSCACHE_OBJECT_DEAD;
38965 spin_unlock(&object->lock);
38966 - fscache_stat(&fscache_n_object_dead);
38967 + fscache_stat_unchecked(&fscache_n_object_dead);
38968 goto terminal_transit;
38969
38970 /* handle the parent cache of this object being withdrawn from
38971 @@ -248,7 +248,7 @@ static void fscache_object_state_machine
38972 spin_lock(&object->lock);
38973 object->state = FSCACHE_OBJECT_DEAD;
38974 spin_unlock(&object->lock);
38975 - fscache_stat(&fscache_n_object_dead);
38976 + fscache_stat_unchecked(&fscache_n_object_dead);
38977 goto terminal_transit;
38978
38979 /* complain about the object being woken up once it is
38980 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
38981 parent->cookie->def->name, cookie->def->name,
38982 object->cache->tag->name);
38983
38984 - fscache_stat(&fscache_n_object_lookups);
38985 + fscache_stat_unchecked(&fscache_n_object_lookups);
38986 fscache_stat(&fscache_n_cop_lookup_object);
38987 ret = object->cache->ops->lookup_object(object);
38988 fscache_stat_d(&fscache_n_cop_lookup_object);
38989 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
38990 if (ret == -ETIMEDOUT) {
38991 /* probably stuck behind another object, so move this one to
38992 * the back of the queue */
38993 - fscache_stat(&fscache_n_object_lookups_timed_out);
38994 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
38995 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
38996 }
38997
38998 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
38999
39000 spin_lock(&object->lock);
39001 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39002 - fscache_stat(&fscache_n_object_lookups_negative);
39003 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
39004
39005 /* transit here to allow write requests to begin stacking up
39006 * and read requests to begin returning ENODATA */
39007 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
39008 * result, in which case there may be data available */
39009 spin_lock(&object->lock);
39010 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39011 - fscache_stat(&fscache_n_object_lookups_positive);
39012 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
39013
39014 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
39015
39016 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
39017 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39018 } else {
39019 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
39020 - fscache_stat(&fscache_n_object_created);
39021 + fscache_stat_unchecked(&fscache_n_object_created);
39022
39023 object->state = FSCACHE_OBJECT_AVAILABLE;
39024 spin_unlock(&object->lock);
39025 @@ -633,7 +633,7 @@ static void fscache_object_available(str
39026 fscache_enqueue_dependents(object);
39027
39028 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
39029 - fscache_stat(&fscache_n_object_avail);
39030 + fscache_stat_unchecked(&fscache_n_object_avail);
39031
39032 _leave("");
39033 }
39034 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
39035 enum fscache_checkaux result;
39036
39037 if (!object->cookie->def->check_aux) {
39038 - fscache_stat(&fscache_n_checkaux_none);
39039 + fscache_stat_unchecked(&fscache_n_checkaux_none);
39040 return FSCACHE_CHECKAUX_OKAY;
39041 }
39042
39043 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
39044 switch (result) {
39045 /* entry okay as is */
39046 case FSCACHE_CHECKAUX_OKAY:
39047 - fscache_stat(&fscache_n_checkaux_okay);
39048 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
39049 break;
39050
39051 /* entry requires update */
39052 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
39053 - fscache_stat(&fscache_n_checkaux_update);
39054 + fscache_stat_unchecked(&fscache_n_checkaux_update);
39055 break;
39056
39057 /* entry requires deletion */
39058 case FSCACHE_CHECKAUX_OBSOLETE:
39059 - fscache_stat(&fscache_n_checkaux_obsolete);
39060 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
39061 break;
39062
39063 default:
39064 diff -urNp linux-2.6.32.42/fs/fscache/operation.c linux-2.6.32.42/fs/fscache/operation.c
39065 --- linux-2.6.32.42/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
39066 +++ linux-2.6.32.42/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
39067 @@ -16,7 +16,7 @@
39068 #include <linux/seq_file.h>
39069 #include "internal.h"
39070
39071 -atomic_t fscache_op_debug_id;
39072 +atomic_unchecked_t fscache_op_debug_id;
39073 EXPORT_SYMBOL(fscache_op_debug_id);
39074
39075 /**
39076 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
39077 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
39078 ASSERTCMP(atomic_read(&op->usage), >, 0);
39079
39080 - fscache_stat(&fscache_n_op_enqueue);
39081 + fscache_stat_unchecked(&fscache_n_op_enqueue);
39082 switch (op->flags & FSCACHE_OP_TYPE) {
39083 case FSCACHE_OP_FAST:
39084 _debug("queue fast");
39085 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
39086 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
39087 if (op->processor)
39088 fscache_enqueue_operation(op);
39089 - fscache_stat(&fscache_n_op_run);
39090 + fscache_stat_unchecked(&fscache_n_op_run);
39091 }
39092
39093 /*
39094 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
39095 if (object->n_ops > 0) {
39096 atomic_inc(&op->usage);
39097 list_add_tail(&op->pend_link, &object->pending_ops);
39098 - fscache_stat(&fscache_n_op_pend);
39099 + fscache_stat_unchecked(&fscache_n_op_pend);
39100 } else if (!list_empty(&object->pending_ops)) {
39101 atomic_inc(&op->usage);
39102 list_add_tail(&op->pend_link, &object->pending_ops);
39103 - fscache_stat(&fscache_n_op_pend);
39104 + fscache_stat_unchecked(&fscache_n_op_pend);
39105 fscache_start_operations(object);
39106 } else {
39107 ASSERTCMP(object->n_in_progress, ==, 0);
39108 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
39109 object->n_exclusive++; /* reads and writes must wait */
39110 atomic_inc(&op->usage);
39111 list_add_tail(&op->pend_link, &object->pending_ops);
39112 - fscache_stat(&fscache_n_op_pend);
39113 + fscache_stat_unchecked(&fscache_n_op_pend);
39114 ret = 0;
39115 } else {
39116 /* not allowed to submit ops in any other state */
39117 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
39118 if (object->n_exclusive > 0) {
39119 atomic_inc(&op->usage);
39120 list_add_tail(&op->pend_link, &object->pending_ops);
39121 - fscache_stat(&fscache_n_op_pend);
39122 + fscache_stat_unchecked(&fscache_n_op_pend);
39123 } else if (!list_empty(&object->pending_ops)) {
39124 atomic_inc(&op->usage);
39125 list_add_tail(&op->pend_link, &object->pending_ops);
39126 - fscache_stat(&fscache_n_op_pend);
39127 + fscache_stat_unchecked(&fscache_n_op_pend);
39128 fscache_start_operations(object);
39129 } else {
39130 ASSERTCMP(object->n_exclusive, ==, 0);
39131 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
39132 object->n_ops++;
39133 atomic_inc(&op->usage);
39134 list_add_tail(&op->pend_link, &object->pending_ops);
39135 - fscache_stat(&fscache_n_op_pend);
39136 + fscache_stat_unchecked(&fscache_n_op_pend);
39137 ret = 0;
39138 } else if (object->state == FSCACHE_OBJECT_DYING ||
39139 object->state == FSCACHE_OBJECT_LC_DYING ||
39140 object->state == FSCACHE_OBJECT_WITHDRAWING) {
39141 - fscache_stat(&fscache_n_op_rejected);
39142 + fscache_stat_unchecked(&fscache_n_op_rejected);
39143 ret = -ENOBUFS;
39144 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
39145 fscache_report_unexpected_submission(object, op, ostate);
39146 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
39147
39148 ret = -EBUSY;
39149 if (!list_empty(&op->pend_link)) {
39150 - fscache_stat(&fscache_n_op_cancelled);
39151 + fscache_stat_unchecked(&fscache_n_op_cancelled);
39152 list_del_init(&op->pend_link);
39153 object->n_ops--;
39154 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
39155 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
39156 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
39157 BUG();
39158
39159 - fscache_stat(&fscache_n_op_release);
39160 + fscache_stat_unchecked(&fscache_n_op_release);
39161
39162 if (op->release) {
39163 op->release(op);
39164 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
39165 * lock, and defer it otherwise */
39166 if (!spin_trylock(&object->lock)) {
39167 _debug("defer put");
39168 - fscache_stat(&fscache_n_op_deferred_release);
39169 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
39170
39171 cache = object->cache;
39172 spin_lock(&cache->op_gc_list_lock);
39173 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
39174
39175 _debug("GC DEFERRED REL OBJ%x OP%x",
39176 object->debug_id, op->debug_id);
39177 - fscache_stat(&fscache_n_op_gc);
39178 + fscache_stat_unchecked(&fscache_n_op_gc);
39179
39180 ASSERTCMP(atomic_read(&op->usage), ==, 0);
39181
39182 diff -urNp linux-2.6.32.42/fs/fscache/page.c linux-2.6.32.42/fs/fscache/page.c
39183 --- linux-2.6.32.42/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
39184 +++ linux-2.6.32.42/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
39185 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
39186 val = radix_tree_lookup(&cookie->stores, page->index);
39187 if (!val) {
39188 rcu_read_unlock();
39189 - fscache_stat(&fscache_n_store_vmscan_not_storing);
39190 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
39191 __fscache_uncache_page(cookie, page);
39192 return true;
39193 }
39194 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
39195 spin_unlock(&cookie->stores_lock);
39196
39197 if (xpage) {
39198 - fscache_stat(&fscache_n_store_vmscan_cancelled);
39199 - fscache_stat(&fscache_n_store_radix_deletes);
39200 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
39201 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39202 ASSERTCMP(xpage, ==, page);
39203 } else {
39204 - fscache_stat(&fscache_n_store_vmscan_gone);
39205 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
39206 }
39207
39208 wake_up_bit(&cookie->flags, 0);
39209 @@ -106,7 +106,7 @@ page_busy:
39210 /* we might want to wait here, but that could deadlock the allocator as
39211 * the slow-work threads writing to the cache may all end up sleeping
39212 * on memory allocation */
39213 - fscache_stat(&fscache_n_store_vmscan_busy);
39214 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
39215 return false;
39216 }
39217 EXPORT_SYMBOL(__fscache_maybe_release_page);
39218 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
39219 FSCACHE_COOKIE_STORING_TAG);
39220 if (!radix_tree_tag_get(&cookie->stores, page->index,
39221 FSCACHE_COOKIE_PENDING_TAG)) {
39222 - fscache_stat(&fscache_n_store_radix_deletes);
39223 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39224 xpage = radix_tree_delete(&cookie->stores, page->index);
39225 }
39226 spin_unlock(&cookie->stores_lock);
39227 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
39228
39229 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
39230
39231 - fscache_stat(&fscache_n_attr_changed_calls);
39232 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
39233
39234 if (fscache_object_is_active(object)) {
39235 fscache_set_op_state(op, "CallFS");
39236 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
39237
39238 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39239
39240 - fscache_stat(&fscache_n_attr_changed);
39241 + fscache_stat_unchecked(&fscache_n_attr_changed);
39242
39243 op = kzalloc(sizeof(*op), GFP_KERNEL);
39244 if (!op) {
39245 - fscache_stat(&fscache_n_attr_changed_nomem);
39246 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
39247 _leave(" = -ENOMEM");
39248 return -ENOMEM;
39249 }
39250 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
39251 if (fscache_submit_exclusive_op(object, op) < 0)
39252 goto nobufs;
39253 spin_unlock(&cookie->lock);
39254 - fscache_stat(&fscache_n_attr_changed_ok);
39255 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
39256 fscache_put_operation(op);
39257 _leave(" = 0");
39258 return 0;
39259 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
39260 nobufs:
39261 spin_unlock(&cookie->lock);
39262 kfree(op);
39263 - fscache_stat(&fscache_n_attr_changed_nobufs);
39264 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
39265 _leave(" = %d", -ENOBUFS);
39266 return -ENOBUFS;
39267 }
39268 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
39269 /* allocate a retrieval operation and attempt to submit it */
39270 op = kzalloc(sizeof(*op), GFP_NOIO);
39271 if (!op) {
39272 - fscache_stat(&fscache_n_retrievals_nomem);
39273 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39274 return NULL;
39275 }
39276
39277 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
39278 return 0;
39279 }
39280
39281 - fscache_stat(&fscache_n_retrievals_wait);
39282 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
39283
39284 jif = jiffies;
39285 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
39286 fscache_wait_bit_interruptible,
39287 TASK_INTERRUPTIBLE) != 0) {
39288 - fscache_stat(&fscache_n_retrievals_intr);
39289 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39290 _leave(" = -ERESTARTSYS");
39291 return -ERESTARTSYS;
39292 }
39293 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
39294 */
39295 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
39296 struct fscache_retrieval *op,
39297 - atomic_t *stat_op_waits,
39298 - atomic_t *stat_object_dead)
39299 + atomic_unchecked_t *stat_op_waits,
39300 + atomic_unchecked_t *stat_object_dead)
39301 {
39302 int ret;
39303
39304 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
39305 goto check_if_dead;
39306
39307 _debug(">>> WT");
39308 - fscache_stat(stat_op_waits);
39309 + fscache_stat_unchecked(stat_op_waits);
39310 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
39311 fscache_wait_bit_interruptible,
39312 TASK_INTERRUPTIBLE) < 0) {
39313 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
39314
39315 check_if_dead:
39316 if (unlikely(fscache_object_is_dead(object))) {
39317 - fscache_stat(stat_object_dead);
39318 + fscache_stat_unchecked(stat_object_dead);
39319 return -ENOBUFS;
39320 }
39321 return 0;
39322 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
39323
39324 _enter("%p,%p,,,", cookie, page);
39325
39326 - fscache_stat(&fscache_n_retrievals);
39327 + fscache_stat_unchecked(&fscache_n_retrievals);
39328
39329 if (hlist_empty(&cookie->backing_objects))
39330 goto nobufs;
39331 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
39332 goto nobufs_unlock;
39333 spin_unlock(&cookie->lock);
39334
39335 - fscache_stat(&fscache_n_retrieval_ops);
39336 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
39337
39338 /* pin the netfs read context in case we need to do the actual netfs
39339 * read because we've encountered a cache read failure */
39340 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
39341
39342 error:
39343 if (ret == -ENOMEM)
39344 - fscache_stat(&fscache_n_retrievals_nomem);
39345 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39346 else if (ret == -ERESTARTSYS)
39347 - fscache_stat(&fscache_n_retrievals_intr);
39348 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39349 else if (ret == -ENODATA)
39350 - fscache_stat(&fscache_n_retrievals_nodata);
39351 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39352 else if (ret < 0)
39353 - fscache_stat(&fscache_n_retrievals_nobufs);
39354 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39355 else
39356 - fscache_stat(&fscache_n_retrievals_ok);
39357 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
39358
39359 fscache_put_retrieval(op);
39360 _leave(" = %d", ret);
39361 @@ -453,7 +453,7 @@ nobufs_unlock:
39362 spin_unlock(&cookie->lock);
39363 kfree(op);
39364 nobufs:
39365 - fscache_stat(&fscache_n_retrievals_nobufs);
39366 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39367 _leave(" = -ENOBUFS");
39368 return -ENOBUFS;
39369 }
39370 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
39371
39372 _enter("%p,,%d,,,", cookie, *nr_pages);
39373
39374 - fscache_stat(&fscache_n_retrievals);
39375 + fscache_stat_unchecked(&fscache_n_retrievals);
39376
39377 if (hlist_empty(&cookie->backing_objects))
39378 goto nobufs;
39379 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
39380 goto nobufs_unlock;
39381 spin_unlock(&cookie->lock);
39382
39383 - fscache_stat(&fscache_n_retrieval_ops);
39384 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
39385
39386 /* pin the netfs read context in case we need to do the actual netfs
39387 * read because we've encountered a cache read failure */
39388 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
39389
39390 error:
39391 if (ret == -ENOMEM)
39392 - fscache_stat(&fscache_n_retrievals_nomem);
39393 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39394 else if (ret == -ERESTARTSYS)
39395 - fscache_stat(&fscache_n_retrievals_intr);
39396 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39397 else if (ret == -ENODATA)
39398 - fscache_stat(&fscache_n_retrievals_nodata);
39399 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39400 else if (ret < 0)
39401 - fscache_stat(&fscache_n_retrievals_nobufs);
39402 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39403 else
39404 - fscache_stat(&fscache_n_retrievals_ok);
39405 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
39406
39407 fscache_put_retrieval(op);
39408 _leave(" = %d", ret);
39409 @@ -570,7 +570,7 @@ nobufs_unlock:
39410 spin_unlock(&cookie->lock);
39411 kfree(op);
39412 nobufs:
39413 - fscache_stat(&fscache_n_retrievals_nobufs);
39414 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39415 _leave(" = -ENOBUFS");
39416 return -ENOBUFS;
39417 }
39418 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
39419
39420 _enter("%p,%p,,,", cookie, page);
39421
39422 - fscache_stat(&fscache_n_allocs);
39423 + fscache_stat_unchecked(&fscache_n_allocs);
39424
39425 if (hlist_empty(&cookie->backing_objects))
39426 goto nobufs;
39427 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
39428 goto nobufs_unlock;
39429 spin_unlock(&cookie->lock);
39430
39431 - fscache_stat(&fscache_n_alloc_ops);
39432 + fscache_stat_unchecked(&fscache_n_alloc_ops);
39433
39434 ret = fscache_wait_for_retrieval_activation(
39435 object, op,
39436 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
39437
39438 error:
39439 if (ret == -ERESTARTSYS)
39440 - fscache_stat(&fscache_n_allocs_intr);
39441 + fscache_stat_unchecked(&fscache_n_allocs_intr);
39442 else if (ret < 0)
39443 - fscache_stat(&fscache_n_allocs_nobufs);
39444 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
39445 else
39446 - fscache_stat(&fscache_n_allocs_ok);
39447 + fscache_stat_unchecked(&fscache_n_allocs_ok);
39448
39449 fscache_put_retrieval(op);
39450 _leave(" = %d", ret);
39451 @@ -651,7 +651,7 @@ nobufs_unlock:
39452 spin_unlock(&cookie->lock);
39453 kfree(op);
39454 nobufs:
39455 - fscache_stat(&fscache_n_allocs_nobufs);
39456 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
39457 _leave(" = -ENOBUFS");
39458 return -ENOBUFS;
39459 }
39460 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
39461
39462 spin_lock(&cookie->stores_lock);
39463
39464 - fscache_stat(&fscache_n_store_calls);
39465 + fscache_stat_unchecked(&fscache_n_store_calls);
39466
39467 /* find a page to store */
39468 page = NULL;
39469 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
39470 page = results[0];
39471 _debug("gang %d [%lx]", n, page->index);
39472 if (page->index > op->store_limit) {
39473 - fscache_stat(&fscache_n_store_pages_over_limit);
39474 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
39475 goto superseded;
39476 }
39477
39478 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
39479
39480 if (page) {
39481 fscache_set_op_state(&op->op, "Store");
39482 - fscache_stat(&fscache_n_store_pages);
39483 + fscache_stat_unchecked(&fscache_n_store_pages);
39484 fscache_stat(&fscache_n_cop_write_page);
39485 ret = object->cache->ops->write_page(op, page);
39486 fscache_stat_d(&fscache_n_cop_write_page);
39487 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
39488 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39489 ASSERT(PageFsCache(page));
39490
39491 - fscache_stat(&fscache_n_stores);
39492 + fscache_stat_unchecked(&fscache_n_stores);
39493
39494 op = kzalloc(sizeof(*op), GFP_NOIO);
39495 if (!op)
39496 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
39497 spin_unlock(&cookie->stores_lock);
39498 spin_unlock(&object->lock);
39499
39500 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
39501 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
39502 op->store_limit = object->store_limit;
39503
39504 if (fscache_submit_op(object, &op->op) < 0)
39505 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
39506
39507 spin_unlock(&cookie->lock);
39508 radix_tree_preload_end();
39509 - fscache_stat(&fscache_n_store_ops);
39510 - fscache_stat(&fscache_n_stores_ok);
39511 + fscache_stat_unchecked(&fscache_n_store_ops);
39512 + fscache_stat_unchecked(&fscache_n_stores_ok);
39513
39514 /* the slow work queue now carries its own ref on the object */
39515 fscache_put_operation(&op->op);
39516 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
39517 return 0;
39518
39519 already_queued:
39520 - fscache_stat(&fscache_n_stores_again);
39521 + fscache_stat_unchecked(&fscache_n_stores_again);
39522 already_pending:
39523 spin_unlock(&cookie->stores_lock);
39524 spin_unlock(&object->lock);
39525 spin_unlock(&cookie->lock);
39526 radix_tree_preload_end();
39527 kfree(op);
39528 - fscache_stat(&fscache_n_stores_ok);
39529 + fscache_stat_unchecked(&fscache_n_stores_ok);
39530 _leave(" = 0");
39531 return 0;
39532
39533 @@ -886,14 +886,14 @@ nobufs:
39534 spin_unlock(&cookie->lock);
39535 radix_tree_preload_end();
39536 kfree(op);
39537 - fscache_stat(&fscache_n_stores_nobufs);
39538 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
39539 _leave(" = -ENOBUFS");
39540 return -ENOBUFS;
39541
39542 nomem_free:
39543 kfree(op);
39544 nomem:
39545 - fscache_stat(&fscache_n_stores_oom);
39546 + fscache_stat_unchecked(&fscache_n_stores_oom);
39547 _leave(" = -ENOMEM");
39548 return -ENOMEM;
39549 }
39550 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
39551 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39552 ASSERTCMP(page, !=, NULL);
39553
39554 - fscache_stat(&fscache_n_uncaches);
39555 + fscache_stat_unchecked(&fscache_n_uncaches);
39556
39557 /* cache withdrawal may beat us to it */
39558 if (!PageFsCache(page))
39559 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
39560 unsigned long loop;
39561
39562 #ifdef CONFIG_FSCACHE_STATS
39563 - atomic_add(pagevec->nr, &fscache_n_marks);
39564 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
39565 #endif
39566
39567 for (loop = 0; loop < pagevec->nr; loop++) {
39568 diff -urNp linux-2.6.32.42/fs/fscache/stats.c linux-2.6.32.42/fs/fscache/stats.c
39569 --- linux-2.6.32.42/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
39570 +++ linux-2.6.32.42/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
39571 @@ -18,95 +18,95 @@
39572 /*
39573 * operation counters
39574 */
39575 -atomic_t fscache_n_op_pend;
39576 -atomic_t fscache_n_op_run;
39577 -atomic_t fscache_n_op_enqueue;
39578 -atomic_t fscache_n_op_requeue;
39579 -atomic_t fscache_n_op_deferred_release;
39580 -atomic_t fscache_n_op_release;
39581 -atomic_t fscache_n_op_gc;
39582 -atomic_t fscache_n_op_cancelled;
39583 -atomic_t fscache_n_op_rejected;
39584 -
39585 -atomic_t fscache_n_attr_changed;
39586 -atomic_t fscache_n_attr_changed_ok;
39587 -atomic_t fscache_n_attr_changed_nobufs;
39588 -atomic_t fscache_n_attr_changed_nomem;
39589 -atomic_t fscache_n_attr_changed_calls;
39590 -
39591 -atomic_t fscache_n_allocs;
39592 -atomic_t fscache_n_allocs_ok;
39593 -atomic_t fscache_n_allocs_wait;
39594 -atomic_t fscache_n_allocs_nobufs;
39595 -atomic_t fscache_n_allocs_intr;
39596 -atomic_t fscache_n_allocs_object_dead;
39597 -atomic_t fscache_n_alloc_ops;
39598 -atomic_t fscache_n_alloc_op_waits;
39599 -
39600 -atomic_t fscache_n_retrievals;
39601 -atomic_t fscache_n_retrievals_ok;
39602 -atomic_t fscache_n_retrievals_wait;
39603 -atomic_t fscache_n_retrievals_nodata;
39604 -atomic_t fscache_n_retrievals_nobufs;
39605 -atomic_t fscache_n_retrievals_intr;
39606 -atomic_t fscache_n_retrievals_nomem;
39607 -atomic_t fscache_n_retrievals_object_dead;
39608 -atomic_t fscache_n_retrieval_ops;
39609 -atomic_t fscache_n_retrieval_op_waits;
39610 -
39611 -atomic_t fscache_n_stores;
39612 -atomic_t fscache_n_stores_ok;
39613 -atomic_t fscache_n_stores_again;
39614 -atomic_t fscache_n_stores_nobufs;
39615 -atomic_t fscache_n_stores_oom;
39616 -atomic_t fscache_n_store_ops;
39617 -atomic_t fscache_n_store_calls;
39618 -atomic_t fscache_n_store_pages;
39619 -atomic_t fscache_n_store_radix_deletes;
39620 -atomic_t fscache_n_store_pages_over_limit;
39621 -
39622 -atomic_t fscache_n_store_vmscan_not_storing;
39623 -atomic_t fscache_n_store_vmscan_gone;
39624 -atomic_t fscache_n_store_vmscan_busy;
39625 -atomic_t fscache_n_store_vmscan_cancelled;
39626 -
39627 -atomic_t fscache_n_marks;
39628 -atomic_t fscache_n_uncaches;
39629 -
39630 -atomic_t fscache_n_acquires;
39631 -atomic_t fscache_n_acquires_null;
39632 -atomic_t fscache_n_acquires_no_cache;
39633 -atomic_t fscache_n_acquires_ok;
39634 -atomic_t fscache_n_acquires_nobufs;
39635 -atomic_t fscache_n_acquires_oom;
39636 -
39637 -atomic_t fscache_n_updates;
39638 -atomic_t fscache_n_updates_null;
39639 -atomic_t fscache_n_updates_run;
39640 -
39641 -atomic_t fscache_n_relinquishes;
39642 -atomic_t fscache_n_relinquishes_null;
39643 -atomic_t fscache_n_relinquishes_waitcrt;
39644 -atomic_t fscache_n_relinquishes_retire;
39645 -
39646 -atomic_t fscache_n_cookie_index;
39647 -atomic_t fscache_n_cookie_data;
39648 -atomic_t fscache_n_cookie_special;
39649 -
39650 -atomic_t fscache_n_object_alloc;
39651 -atomic_t fscache_n_object_no_alloc;
39652 -atomic_t fscache_n_object_lookups;
39653 -atomic_t fscache_n_object_lookups_negative;
39654 -atomic_t fscache_n_object_lookups_positive;
39655 -atomic_t fscache_n_object_lookups_timed_out;
39656 -atomic_t fscache_n_object_created;
39657 -atomic_t fscache_n_object_avail;
39658 -atomic_t fscache_n_object_dead;
39659 -
39660 -atomic_t fscache_n_checkaux_none;
39661 -atomic_t fscache_n_checkaux_okay;
39662 -atomic_t fscache_n_checkaux_update;
39663 -atomic_t fscache_n_checkaux_obsolete;
39664 +atomic_unchecked_t fscache_n_op_pend;
39665 +atomic_unchecked_t fscache_n_op_run;
39666 +atomic_unchecked_t fscache_n_op_enqueue;
39667 +atomic_unchecked_t fscache_n_op_requeue;
39668 +atomic_unchecked_t fscache_n_op_deferred_release;
39669 +atomic_unchecked_t fscache_n_op_release;
39670 +atomic_unchecked_t fscache_n_op_gc;
39671 +atomic_unchecked_t fscache_n_op_cancelled;
39672 +atomic_unchecked_t fscache_n_op_rejected;
39673 +
39674 +atomic_unchecked_t fscache_n_attr_changed;
39675 +atomic_unchecked_t fscache_n_attr_changed_ok;
39676 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
39677 +atomic_unchecked_t fscache_n_attr_changed_nomem;
39678 +atomic_unchecked_t fscache_n_attr_changed_calls;
39679 +
39680 +atomic_unchecked_t fscache_n_allocs;
39681 +atomic_unchecked_t fscache_n_allocs_ok;
39682 +atomic_unchecked_t fscache_n_allocs_wait;
39683 +atomic_unchecked_t fscache_n_allocs_nobufs;
39684 +atomic_unchecked_t fscache_n_allocs_intr;
39685 +atomic_unchecked_t fscache_n_allocs_object_dead;
39686 +atomic_unchecked_t fscache_n_alloc_ops;
39687 +atomic_unchecked_t fscache_n_alloc_op_waits;
39688 +
39689 +atomic_unchecked_t fscache_n_retrievals;
39690 +atomic_unchecked_t fscache_n_retrievals_ok;
39691 +atomic_unchecked_t fscache_n_retrievals_wait;
39692 +atomic_unchecked_t fscache_n_retrievals_nodata;
39693 +atomic_unchecked_t fscache_n_retrievals_nobufs;
39694 +atomic_unchecked_t fscache_n_retrievals_intr;
39695 +atomic_unchecked_t fscache_n_retrievals_nomem;
39696 +atomic_unchecked_t fscache_n_retrievals_object_dead;
39697 +atomic_unchecked_t fscache_n_retrieval_ops;
39698 +atomic_unchecked_t fscache_n_retrieval_op_waits;
39699 +
39700 +atomic_unchecked_t fscache_n_stores;
39701 +atomic_unchecked_t fscache_n_stores_ok;
39702 +atomic_unchecked_t fscache_n_stores_again;
39703 +atomic_unchecked_t fscache_n_stores_nobufs;
39704 +atomic_unchecked_t fscache_n_stores_oom;
39705 +atomic_unchecked_t fscache_n_store_ops;
39706 +atomic_unchecked_t fscache_n_store_calls;
39707 +atomic_unchecked_t fscache_n_store_pages;
39708 +atomic_unchecked_t fscache_n_store_radix_deletes;
39709 +atomic_unchecked_t fscache_n_store_pages_over_limit;
39710 +
39711 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
39712 +atomic_unchecked_t fscache_n_store_vmscan_gone;
39713 +atomic_unchecked_t fscache_n_store_vmscan_busy;
39714 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
39715 +
39716 +atomic_unchecked_t fscache_n_marks;
39717 +atomic_unchecked_t fscache_n_uncaches;
39718 +
39719 +atomic_unchecked_t fscache_n_acquires;
39720 +atomic_unchecked_t fscache_n_acquires_null;
39721 +atomic_unchecked_t fscache_n_acquires_no_cache;
39722 +atomic_unchecked_t fscache_n_acquires_ok;
39723 +atomic_unchecked_t fscache_n_acquires_nobufs;
39724 +atomic_unchecked_t fscache_n_acquires_oom;
39725 +
39726 +atomic_unchecked_t fscache_n_updates;
39727 +atomic_unchecked_t fscache_n_updates_null;
39728 +atomic_unchecked_t fscache_n_updates_run;
39729 +
39730 +atomic_unchecked_t fscache_n_relinquishes;
39731 +atomic_unchecked_t fscache_n_relinquishes_null;
39732 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
39733 +atomic_unchecked_t fscache_n_relinquishes_retire;
39734 +
39735 +atomic_unchecked_t fscache_n_cookie_index;
39736 +atomic_unchecked_t fscache_n_cookie_data;
39737 +atomic_unchecked_t fscache_n_cookie_special;
39738 +
39739 +atomic_unchecked_t fscache_n_object_alloc;
39740 +atomic_unchecked_t fscache_n_object_no_alloc;
39741 +atomic_unchecked_t fscache_n_object_lookups;
39742 +atomic_unchecked_t fscache_n_object_lookups_negative;
39743 +atomic_unchecked_t fscache_n_object_lookups_positive;
39744 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
39745 +atomic_unchecked_t fscache_n_object_created;
39746 +atomic_unchecked_t fscache_n_object_avail;
39747 +atomic_unchecked_t fscache_n_object_dead;
39748 +
39749 +atomic_unchecked_t fscache_n_checkaux_none;
39750 +atomic_unchecked_t fscache_n_checkaux_okay;
39751 +atomic_unchecked_t fscache_n_checkaux_update;
39752 +atomic_unchecked_t fscache_n_checkaux_obsolete;
39753
39754 atomic_t fscache_n_cop_alloc_object;
39755 atomic_t fscache_n_cop_lookup_object;
39756 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
39757 seq_puts(m, "FS-Cache statistics\n");
39758
39759 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
39760 - atomic_read(&fscache_n_cookie_index),
39761 - atomic_read(&fscache_n_cookie_data),
39762 - atomic_read(&fscache_n_cookie_special));
39763 + atomic_read_unchecked(&fscache_n_cookie_index),
39764 + atomic_read_unchecked(&fscache_n_cookie_data),
39765 + atomic_read_unchecked(&fscache_n_cookie_special));
39766
39767 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
39768 - atomic_read(&fscache_n_object_alloc),
39769 - atomic_read(&fscache_n_object_no_alloc),
39770 - atomic_read(&fscache_n_object_avail),
39771 - atomic_read(&fscache_n_object_dead));
39772 + atomic_read_unchecked(&fscache_n_object_alloc),
39773 + atomic_read_unchecked(&fscache_n_object_no_alloc),
39774 + atomic_read_unchecked(&fscache_n_object_avail),
39775 + atomic_read_unchecked(&fscache_n_object_dead));
39776 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
39777 - atomic_read(&fscache_n_checkaux_none),
39778 - atomic_read(&fscache_n_checkaux_okay),
39779 - atomic_read(&fscache_n_checkaux_update),
39780 - atomic_read(&fscache_n_checkaux_obsolete));
39781 + atomic_read_unchecked(&fscache_n_checkaux_none),
39782 + atomic_read_unchecked(&fscache_n_checkaux_okay),
39783 + atomic_read_unchecked(&fscache_n_checkaux_update),
39784 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
39785
39786 seq_printf(m, "Pages : mrk=%u unc=%u\n",
39787 - atomic_read(&fscache_n_marks),
39788 - atomic_read(&fscache_n_uncaches));
39789 + atomic_read_unchecked(&fscache_n_marks),
39790 + atomic_read_unchecked(&fscache_n_uncaches));
39791
39792 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
39793 " oom=%u\n",
39794 - atomic_read(&fscache_n_acquires),
39795 - atomic_read(&fscache_n_acquires_null),
39796 - atomic_read(&fscache_n_acquires_no_cache),
39797 - atomic_read(&fscache_n_acquires_ok),
39798 - atomic_read(&fscache_n_acquires_nobufs),
39799 - atomic_read(&fscache_n_acquires_oom));
39800 + atomic_read_unchecked(&fscache_n_acquires),
39801 + atomic_read_unchecked(&fscache_n_acquires_null),
39802 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
39803 + atomic_read_unchecked(&fscache_n_acquires_ok),
39804 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
39805 + atomic_read_unchecked(&fscache_n_acquires_oom));
39806
39807 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
39808 - atomic_read(&fscache_n_object_lookups),
39809 - atomic_read(&fscache_n_object_lookups_negative),
39810 - atomic_read(&fscache_n_object_lookups_positive),
39811 - atomic_read(&fscache_n_object_lookups_timed_out),
39812 - atomic_read(&fscache_n_object_created));
39813 + atomic_read_unchecked(&fscache_n_object_lookups),
39814 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
39815 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
39816 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
39817 + atomic_read_unchecked(&fscache_n_object_created));
39818
39819 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
39820 - atomic_read(&fscache_n_updates),
39821 - atomic_read(&fscache_n_updates_null),
39822 - atomic_read(&fscache_n_updates_run));
39823 + atomic_read_unchecked(&fscache_n_updates),
39824 + atomic_read_unchecked(&fscache_n_updates_null),
39825 + atomic_read_unchecked(&fscache_n_updates_run));
39826
39827 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
39828 - atomic_read(&fscache_n_relinquishes),
39829 - atomic_read(&fscache_n_relinquishes_null),
39830 - atomic_read(&fscache_n_relinquishes_waitcrt),
39831 - atomic_read(&fscache_n_relinquishes_retire));
39832 + atomic_read_unchecked(&fscache_n_relinquishes),
39833 + atomic_read_unchecked(&fscache_n_relinquishes_null),
39834 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
39835 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
39836
39837 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
39838 - atomic_read(&fscache_n_attr_changed),
39839 - atomic_read(&fscache_n_attr_changed_ok),
39840 - atomic_read(&fscache_n_attr_changed_nobufs),
39841 - atomic_read(&fscache_n_attr_changed_nomem),
39842 - atomic_read(&fscache_n_attr_changed_calls));
39843 + atomic_read_unchecked(&fscache_n_attr_changed),
39844 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
39845 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
39846 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
39847 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
39848
39849 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
39850 - atomic_read(&fscache_n_allocs),
39851 - atomic_read(&fscache_n_allocs_ok),
39852 - atomic_read(&fscache_n_allocs_wait),
39853 - atomic_read(&fscache_n_allocs_nobufs),
39854 - atomic_read(&fscache_n_allocs_intr));
39855 + atomic_read_unchecked(&fscache_n_allocs),
39856 + atomic_read_unchecked(&fscache_n_allocs_ok),
39857 + atomic_read_unchecked(&fscache_n_allocs_wait),
39858 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
39859 + atomic_read_unchecked(&fscache_n_allocs_intr));
39860 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
39861 - atomic_read(&fscache_n_alloc_ops),
39862 - atomic_read(&fscache_n_alloc_op_waits),
39863 - atomic_read(&fscache_n_allocs_object_dead));
39864 + atomic_read_unchecked(&fscache_n_alloc_ops),
39865 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
39866 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
39867
39868 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
39869 " int=%u oom=%u\n",
39870 - atomic_read(&fscache_n_retrievals),
39871 - atomic_read(&fscache_n_retrievals_ok),
39872 - atomic_read(&fscache_n_retrievals_wait),
39873 - atomic_read(&fscache_n_retrievals_nodata),
39874 - atomic_read(&fscache_n_retrievals_nobufs),
39875 - atomic_read(&fscache_n_retrievals_intr),
39876 - atomic_read(&fscache_n_retrievals_nomem));
39877 + atomic_read_unchecked(&fscache_n_retrievals),
39878 + atomic_read_unchecked(&fscache_n_retrievals_ok),
39879 + atomic_read_unchecked(&fscache_n_retrievals_wait),
39880 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
39881 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
39882 + atomic_read_unchecked(&fscache_n_retrievals_intr),
39883 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
39884 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
39885 - atomic_read(&fscache_n_retrieval_ops),
39886 - atomic_read(&fscache_n_retrieval_op_waits),
39887 - atomic_read(&fscache_n_retrievals_object_dead));
39888 + atomic_read_unchecked(&fscache_n_retrieval_ops),
39889 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
39890 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
39891
39892 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
39893 - atomic_read(&fscache_n_stores),
39894 - atomic_read(&fscache_n_stores_ok),
39895 - atomic_read(&fscache_n_stores_again),
39896 - atomic_read(&fscache_n_stores_nobufs),
39897 - atomic_read(&fscache_n_stores_oom));
39898 + atomic_read_unchecked(&fscache_n_stores),
39899 + atomic_read_unchecked(&fscache_n_stores_ok),
39900 + atomic_read_unchecked(&fscache_n_stores_again),
39901 + atomic_read_unchecked(&fscache_n_stores_nobufs),
39902 + atomic_read_unchecked(&fscache_n_stores_oom));
39903 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
39904 - atomic_read(&fscache_n_store_ops),
39905 - atomic_read(&fscache_n_store_calls),
39906 - atomic_read(&fscache_n_store_pages),
39907 - atomic_read(&fscache_n_store_radix_deletes),
39908 - atomic_read(&fscache_n_store_pages_over_limit));
39909 + atomic_read_unchecked(&fscache_n_store_ops),
39910 + atomic_read_unchecked(&fscache_n_store_calls),
39911 + atomic_read_unchecked(&fscache_n_store_pages),
39912 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
39913 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
39914
39915 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
39916 - atomic_read(&fscache_n_store_vmscan_not_storing),
39917 - atomic_read(&fscache_n_store_vmscan_gone),
39918 - atomic_read(&fscache_n_store_vmscan_busy),
39919 - atomic_read(&fscache_n_store_vmscan_cancelled));
39920 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
39921 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
39922 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
39923 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
39924
39925 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
39926 - atomic_read(&fscache_n_op_pend),
39927 - atomic_read(&fscache_n_op_run),
39928 - atomic_read(&fscache_n_op_enqueue),
39929 - atomic_read(&fscache_n_op_cancelled),
39930 - atomic_read(&fscache_n_op_rejected));
39931 + atomic_read_unchecked(&fscache_n_op_pend),
39932 + atomic_read_unchecked(&fscache_n_op_run),
39933 + atomic_read_unchecked(&fscache_n_op_enqueue),
39934 + atomic_read_unchecked(&fscache_n_op_cancelled),
39935 + atomic_read_unchecked(&fscache_n_op_rejected));
39936 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
39937 - atomic_read(&fscache_n_op_deferred_release),
39938 - atomic_read(&fscache_n_op_release),
39939 - atomic_read(&fscache_n_op_gc));
39940 + atomic_read_unchecked(&fscache_n_op_deferred_release),
39941 + atomic_read_unchecked(&fscache_n_op_release),
39942 + atomic_read_unchecked(&fscache_n_op_gc));
39943
39944 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
39945 atomic_read(&fscache_n_cop_alloc_object),
39946 diff -urNp linux-2.6.32.42/fs/fs_struct.c linux-2.6.32.42/fs/fs_struct.c
39947 --- linux-2.6.32.42/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
39948 +++ linux-2.6.32.42/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
39949 @@ -4,6 +4,7 @@
39950 #include <linux/path.h>
39951 #include <linux/slab.h>
39952 #include <linux/fs_struct.h>
39953 +#include <linux/grsecurity.h>
39954
39955 /*
39956 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
39957 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
39958 old_root = fs->root;
39959 fs->root = *path;
39960 path_get(path);
39961 + gr_set_chroot_entries(current, path);
39962 write_unlock(&fs->lock);
39963 if (old_root.dentry)
39964 path_put(&old_root);
39965 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
39966 && fs->root.mnt == old_root->mnt) {
39967 path_get(new_root);
39968 fs->root = *new_root;
39969 + gr_set_chroot_entries(p, new_root);
39970 count++;
39971 }
39972 if (fs->pwd.dentry == old_root->dentry
39973 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
39974 task_lock(tsk);
39975 write_lock(&fs->lock);
39976 tsk->fs = NULL;
39977 - kill = !--fs->users;
39978 + gr_clear_chroot_entries(tsk);
39979 + kill = !atomic_dec_return(&fs->users);
39980 write_unlock(&fs->lock);
39981 task_unlock(tsk);
39982 if (kill)
39983 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
39984 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
39985 /* We don't need to lock fs - think why ;-) */
39986 if (fs) {
39987 - fs->users = 1;
39988 + atomic_set(&fs->users, 1);
39989 fs->in_exec = 0;
39990 rwlock_init(&fs->lock);
39991 fs->umask = old->umask;
39992 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
39993
39994 task_lock(current);
39995 write_lock(&fs->lock);
39996 - kill = !--fs->users;
39997 + kill = !atomic_dec_return(&fs->users);
39998 current->fs = new_fs;
39999 + gr_set_chroot_entries(current, &new_fs->root);
40000 write_unlock(&fs->lock);
40001 task_unlock(current);
40002
40003 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
40004
40005 /* to be mentioned only in INIT_TASK */
40006 struct fs_struct init_fs = {
40007 - .users = 1,
40008 + .users = ATOMIC_INIT(1),
40009 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
40010 .umask = 0022,
40011 };
40012 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
40013 task_lock(current);
40014
40015 write_lock(&init_fs.lock);
40016 - init_fs.users++;
40017 + atomic_inc(&init_fs.users);
40018 write_unlock(&init_fs.lock);
40019
40020 write_lock(&fs->lock);
40021 current->fs = &init_fs;
40022 - kill = !--fs->users;
40023 + gr_set_chroot_entries(current, &current->fs->root);
40024 + kill = !atomic_dec_return(&fs->users);
40025 write_unlock(&fs->lock);
40026
40027 task_unlock(current);
40028 diff -urNp linux-2.6.32.42/fs/fuse/cuse.c linux-2.6.32.42/fs/fuse/cuse.c
40029 --- linux-2.6.32.42/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
40030 +++ linux-2.6.32.42/fs/fuse/cuse.c 2011-04-17 15:56:46.000000000 -0400
40031 @@ -528,8 +528,18 @@ static int cuse_channel_release(struct i
40032 return rc;
40033 }
40034
40035 -static struct file_operations cuse_channel_fops; /* initialized during init */
40036 -
40037 +static const struct file_operations cuse_channel_fops = { /* initialized during init */
40038 + .owner = THIS_MODULE,
40039 + .llseek = no_llseek,
40040 + .read = do_sync_read,
40041 + .aio_read = fuse_dev_read,
40042 + .write = do_sync_write,
40043 + .aio_write = fuse_dev_write,
40044 + .poll = fuse_dev_poll,
40045 + .open = cuse_channel_open,
40046 + .release = cuse_channel_release,
40047 + .fasync = fuse_dev_fasync,
40048 +};
40049
40050 /**************************************************************************
40051 * Misc stuff and module initializatiion
40052 @@ -575,12 +585,6 @@ static int __init cuse_init(void)
40053 for (i = 0; i < CUSE_CONNTBL_LEN; i++)
40054 INIT_LIST_HEAD(&cuse_conntbl[i]);
40055
40056 - /* inherit and extend fuse_dev_operations */
40057 - cuse_channel_fops = fuse_dev_operations;
40058 - cuse_channel_fops.owner = THIS_MODULE;
40059 - cuse_channel_fops.open = cuse_channel_open;
40060 - cuse_channel_fops.release = cuse_channel_release;
40061 -
40062 cuse_class = class_create(THIS_MODULE, "cuse");
40063 if (IS_ERR(cuse_class))
40064 return PTR_ERR(cuse_class);
40065 diff -urNp linux-2.6.32.42/fs/fuse/dev.c linux-2.6.32.42/fs/fuse/dev.c
40066 --- linux-2.6.32.42/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
40067 +++ linux-2.6.32.42/fs/fuse/dev.c 2011-04-17 15:56:46.000000000 -0400
40068 @@ -745,7 +745,7 @@ __releases(&fc->lock)
40069 * request_end(). Otherwise add it to the processing list, and set
40070 * the 'sent' flag.
40071 */
40072 -static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40073 +ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40074 unsigned long nr_segs, loff_t pos)
40075 {
40076 int err;
40077 @@ -827,6 +827,7 @@ static ssize_t fuse_dev_read(struct kioc
40078 spin_unlock(&fc->lock);
40079 return err;
40080 }
40081 +EXPORT_SYMBOL_GPL(fuse_dev_read);
40082
40083 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
40084 struct fuse_copy_state *cs)
40085 @@ -885,7 +886,7 @@ static int fuse_notify_inval_entry(struc
40086 {
40087 struct fuse_notify_inval_entry_out outarg;
40088 int err = -EINVAL;
40089 - char buf[FUSE_NAME_MAX+1];
40090 + char *buf = NULL;
40091 struct qstr name;
40092
40093 if (size < sizeof(outarg))
40094 @@ -899,6 +900,11 @@ static int fuse_notify_inval_entry(struc
40095 if (outarg.namelen > FUSE_NAME_MAX)
40096 goto err;
40097
40098 + err = -ENOMEM;
40099 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
40100 + if (!buf)
40101 + goto err;
40102 +
40103 name.name = buf;
40104 name.len = outarg.namelen;
40105 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
40106 @@ -910,17 +916,15 @@ static int fuse_notify_inval_entry(struc
40107
40108 down_read(&fc->killsb);
40109 err = -ENOENT;
40110 - if (!fc->sb)
40111 - goto err_unlock;
40112 -
40113 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
40114 -
40115 -err_unlock:
40116 + if (fc->sb)
40117 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
40118 up_read(&fc->killsb);
40119 + kfree(buf);
40120 return err;
40121
40122 err:
40123 fuse_copy_finish(cs);
40124 + kfree(buf);
40125 return err;
40126 }
40127
40128 @@ -987,7 +991,7 @@ static int copy_out_args(struct fuse_cop
40129 * it from the list and copy the rest of the buffer to the request.
40130 * The request is finished by calling request_end()
40131 */
40132 -static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40133 +ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40134 unsigned long nr_segs, loff_t pos)
40135 {
40136 int err;
40137 @@ -1083,8 +1087,9 @@ static ssize_t fuse_dev_write(struct kio
40138 fuse_copy_finish(&cs);
40139 return err;
40140 }
40141 +EXPORT_SYMBOL_GPL(fuse_dev_write);
40142
40143 -static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40144 +unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40145 {
40146 unsigned mask = POLLOUT | POLLWRNORM;
40147 struct fuse_conn *fc = fuse_get_conn(file);
40148 @@ -1102,6 +1107,7 @@ static unsigned fuse_dev_poll(struct fil
40149
40150 return mask;
40151 }
40152 +EXPORT_SYMBOL_GPL(fuse_dev_poll);
40153
40154 /*
40155 * Abort all requests on the given list (pending or processing)
40156 @@ -1218,7 +1224,7 @@ int fuse_dev_release(struct inode *inode
40157 }
40158 EXPORT_SYMBOL_GPL(fuse_dev_release);
40159
40160 -static int fuse_dev_fasync(int fd, struct file *file, int on)
40161 +int fuse_dev_fasync(int fd, struct file *file, int on)
40162 {
40163 struct fuse_conn *fc = fuse_get_conn(file);
40164 if (!fc)
40165 @@ -1227,6 +1233,7 @@ static int fuse_dev_fasync(int fd, struc
40166 /* No locking - fasync_helper does its own locking */
40167 return fasync_helper(fd, file, on, &fc->fasync);
40168 }
40169 +EXPORT_SYMBOL_GPL(fuse_dev_fasync);
40170
40171 const struct file_operations fuse_dev_operations = {
40172 .owner = THIS_MODULE,
40173 diff -urNp linux-2.6.32.42/fs/fuse/dir.c linux-2.6.32.42/fs/fuse/dir.c
40174 --- linux-2.6.32.42/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
40175 +++ linux-2.6.32.42/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
40176 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
40177 return link;
40178 }
40179
40180 -static void free_link(char *link)
40181 +static void free_link(const char *link)
40182 {
40183 if (!IS_ERR(link))
40184 free_page((unsigned long) link);
40185 diff -urNp linux-2.6.32.42/fs/fuse/fuse_i.h linux-2.6.32.42/fs/fuse/fuse_i.h
40186 --- linux-2.6.32.42/fs/fuse/fuse_i.h 2011-03-27 14:31:47.000000000 -0400
40187 +++ linux-2.6.32.42/fs/fuse/fuse_i.h 2011-04-17 15:56:46.000000000 -0400
40188 @@ -525,6 +525,16 @@ extern const struct file_operations fuse
40189
40190 extern const struct dentry_operations fuse_dentry_operations;
40191
40192 +extern ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40193 + unsigned long nr_segs, loff_t pos);
40194 +
40195 +extern ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40196 + unsigned long nr_segs, loff_t pos);
40197 +
40198 +extern unsigned fuse_dev_poll(struct file *file, poll_table *wait);
40199 +
40200 +extern int fuse_dev_fasync(int fd, struct file *file, int on);
40201 +
40202 /**
40203 * Inode to nodeid comparison.
40204 */
40205 diff -urNp linux-2.6.32.42/fs/gfs2/ops_inode.c linux-2.6.32.42/fs/gfs2/ops_inode.c
40206 --- linux-2.6.32.42/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
40207 +++ linux-2.6.32.42/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
40208 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
40209 unsigned int x;
40210 int error;
40211
40212 + pax_track_stack();
40213 +
40214 if (ndentry->d_inode) {
40215 nip = GFS2_I(ndentry->d_inode);
40216 if (ip == nip)
40217 diff -urNp linux-2.6.32.42/fs/gfs2/sys.c linux-2.6.32.42/fs/gfs2/sys.c
40218 --- linux-2.6.32.42/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
40219 +++ linux-2.6.32.42/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
40220 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
40221 return a->store ? a->store(sdp, buf, len) : len;
40222 }
40223
40224 -static struct sysfs_ops gfs2_attr_ops = {
40225 +static const struct sysfs_ops gfs2_attr_ops = {
40226 .show = gfs2_attr_show,
40227 .store = gfs2_attr_store,
40228 };
40229 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
40230 return 0;
40231 }
40232
40233 -static struct kset_uevent_ops gfs2_uevent_ops = {
40234 +static const struct kset_uevent_ops gfs2_uevent_ops = {
40235 .uevent = gfs2_uevent,
40236 };
40237
40238 diff -urNp linux-2.6.32.42/fs/hfsplus/catalog.c linux-2.6.32.42/fs/hfsplus/catalog.c
40239 --- linux-2.6.32.42/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
40240 +++ linux-2.6.32.42/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
40241 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
40242 int err;
40243 u16 type;
40244
40245 + pax_track_stack();
40246 +
40247 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
40248 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
40249 if (err)
40250 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
40251 int entry_size;
40252 int err;
40253
40254 + pax_track_stack();
40255 +
40256 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
40257 sb = dir->i_sb;
40258 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
40259 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
40260 int entry_size, type;
40261 int err = 0;
40262
40263 + pax_track_stack();
40264 +
40265 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
40266 dst_dir->i_ino, dst_name->name);
40267 sb = src_dir->i_sb;
40268 diff -urNp linux-2.6.32.42/fs/hfsplus/dir.c linux-2.6.32.42/fs/hfsplus/dir.c
40269 --- linux-2.6.32.42/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
40270 +++ linux-2.6.32.42/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
40271 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
40272 struct hfsplus_readdir_data *rd;
40273 u16 type;
40274
40275 + pax_track_stack();
40276 +
40277 if (filp->f_pos >= inode->i_size)
40278 return 0;
40279
40280 diff -urNp linux-2.6.32.42/fs/hfsplus/inode.c linux-2.6.32.42/fs/hfsplus/inode.c
40281 --- linux-2.6.32.42/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
40282 +++ linux-2.6.32.42/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
40283 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
40284 int res = 0;
40285 u16 type;
40286
40287 + pax_track_stack();
40288 +
40289 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
40290
40291 HFSPLUS_I(inode).dev = 0;
40292 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
40293 struct hfs_find_data fd;
40294 hfsplus_cat_entry entry;
40295
40296 + pax_track_stack();
40297 +
40298 if (HFSPLUS_IS_RSRC(inode))
40299 main_inode = HFSPLUS_I(inode).rsrc_inode;
40300
40301 diff -urNp linux-2.6.32.42/fs/hfsplus/ioctl.c linux-2.6.32.42/fs/hfsplus/ioctl.c
40302 --- linux-2.6.32.42/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40303 +++ linux-2.6.32.42/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
40304 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
40305 struct hfsplus_cat_file *file;
40306 int res;
40307
40308 + pax_track_stack();
40309 +
40310 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40311 return -EOPNOTSUPP;
40312
40313 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
40314 struct hfsplus_cat_file *file;
40315 ssize_t res = 0;
40316
40317 + pax_track_stack();
40318 +
40319 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40320 return -EOPNOTSUPP;
40321
40322 diff -urNp linux-2.6.32.42/fs/hfsplus/super.c linux-2.6.32.42/fs/hfsplus/super.c
40323 --- linux-2.6.32.42/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
40324 +++ linux-2.6.32.42/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
40325 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
40326 struct nls_table *nls = NULL;
40327 int err = -EINVAL;
40328
40329 + pax_track_stack();
40330 +
40331 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
40332 if (!sbi)
40333 return -ENOMEM;
40334 diff -urNp linux-2.6.32.42/fs/hugetlbfs/inode.c linux-2.6.32.42/fs/hugetlbfs/inode.c
40335 --- linux-2.6.32.42/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40336 +++ linux-2.6.32.42/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40337 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
40338 .kill_sb = kill_litter_super,
40339 };
40340
40341 -static struct vfsmount *hugetlbfs_vfsmount;
40342 +struct vfsmount *hugetlbfs_vfsmount;
40343
40344 static int can_do_hugetlb_shm(void)
40345 {
40346 diff -urNp linux-2.6.32.42/fs/ioctl.c linux-2.6.32.42/fs/ioctl.c
40347 --- linux-2.6.32.42/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40348 +++ linux-2.6.32.42/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
40349 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
40350 u64 phys, u64 len, u32 flags)
40351 {
40352 struct fiemap_extent extent;
40353 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
40354 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
40355
40356 /* only count the extents */
40357 if (fieinfo->fi_extents_max == 0) {
40358 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
40359
40360 fieinfo.fi_flags = fiemap.fm_flags;
40361 fieinfo.fi_extents_max = fiemap.fm_extent_count;
40362 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
40363 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
40364
40365 if (fiemap.fm_extent_count != 0 &&
40366 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
40367 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
40368 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
40369 fiemap.fm_flags = fieinfo.fi_flags;
40370 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
40371 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
40372 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
40373 error = -EFAULT;
40374
40375 return error;
40376 diff -urNp linux-2.6.32.42/fs/jbd/checkpoint.c linux-2.6.32.42/fs/jbd/checkpoint.c
40377 --- linux-2.6.32.42/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
40378 +++ linux-2.6.32.42/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
40379 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
40380 tid_t this_tid;
40381 int result;
40382
40383 + pax_track_stack();
40384 +
40385 jbd_debug(1, "Start checkpoint\n");
40386
40387 /*
40388 diff -urNp linux-2.6.32.42/fs/jffs2/compr_rtime.c linux-2.6.32.42/fs/jffs2/compr_rtime.c
40389 --- linux-2.6.32.42/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
40390 +++ linux-2.6.32.42/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
40391 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
40392 int outpos = 0;
40393 int pos=0;
40394
40395 + pax_track_stack();
40396 +
40397 memset(positions,0,sizeof(positions));
40398
40399 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
40400 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
40401 int outpos = 0;
40402 int pos=0;
40403
40404 + pax_track_stack();
40405 +
40406 memset(positions,0,sizeof(positions));
40407
40408 while (outpos<destlen) {
40409 diff -urNp linux-2.6.32.42/fs/jffs2/compr_rubin.c linux-2.6.32.42/fs/jffs2/compr_rubin.c
40410 --- linux-2.6.32.42/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
40411 +++ linux-2.6.32.42/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
40412 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
40413 int ret;
40414 uint32_t mysrclen, mydstlen;
40415
40416 + pax_track_stack();
40417 +
40418 mysrclen = *sourcelen;
40419 mydstlen = *dstlen - 8;
40420
40421 diff -urNp linux-2.6.32.42/fs/jffs2/erase.c linux-2.6.32.42/fs/jffs2/erase.c
40422 --- linux-2.6.32.42/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
40423 +++ linux-2.6.32.42/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
40424 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
40425 struct jffs2_unknown_node marker = {
40426 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
40427 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40428 - .totlen = cpu_to_je32(c->cleanmarker_size)
40429 + .totlen = cpu_to_je32(c->cleanmarker_size),
40430 + .hdr_crc = cpu_to_je32(0)
40431 };
40432
40433 jffs2_prealloc_raw_node_refs(c, jeb, 1);
40434 diff -urNp linux-2.6.32.42/fs/jffs2/wbuf.c linux-2.6.32.42/fs/jffs2/wbuf.c
40435 --- linux-2.6.32.42/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
40436 +++ linux-2.6.32.42/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
40437 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
40438 {
40439 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
40440 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40441 - .totlen = constant_cpu_to_je32(8)
40442 + .totlen = constant_cpu_to_je32(8),
40443 + .hdr_crc = constant_cpu_to_je32(0)
40444 };
40445
40446 /*
40447 diff -urNp linux-2.6.32.42/fs/jffs2/xattr.c linux-2.6.32.42/fs/jffs2/xattr.c
40448 --- linux-2.6.32.42/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
40449 +++ linux-2.6.32.42/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
40450 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
40451
40452 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
40453
40454 + pax_track_stack();
40455 +
40456 /* Phase.1 : Merge same xref */
40457 for (i=0; i < XREF_TMPHASH_SIZE; i++)
40458 xref_tmphash[i] = NULL;
40459 diff -urNp linux-2.6.32.42/fs/jfs/super.c linux-2.6.32.42/fs/jfs/super.c
40460 --- linux-2.6.32.42/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
40461 +++ linux-2.6.32.42/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
40462 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
40463
40464 jfs_inode_cachep =
40465 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
40466 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
40467 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
40468 init_once);
40469 if (jfs_inode_cachep == NULL)
40470 return -ENOMEM;
40471 diff -urNp linux-2.6.32.42/fs/Kconfig.binfmt linux-2.6.32.42/fs/Kconfig.binfmt
40472 --- linux-2.6.32.42/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
40473 +++ linux-2.6.32.42/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
40474 @@ -86,7 +86,7 @@ config HAVE_AOUT
40475
40476 config BINFMT_AOUT
40477 tristate "Kernel support for a.out and ECOFF binaries"
40478 - depends on HAVE_AOUT
40479 + depends on HAVE_AOUT && BROKEN
40480 ---help---
40481 A.out (Assembler.OUTput) is a set of formats for libraries and
40482 executables used in the earliest versions of UNIX. Linux used
40483 diff -urNp linux-2.6.32.42/fs/libfs.c linux-2.6.32.42/fs/libfs.c
40484 --- linux-2.6.32.42/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
40485 +++ linux-2.6.32.42/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
40486 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
40487
40488 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
40489 struct dentry *next;
40490 + char d_name[sizeof(next->d_iname)];
40491 + const unsigned char *name;
40492 +
40493 next = list_entry(p, struct dentry, d_u.d_child);
40494 if (d_unhashed(next) || !next->d_inode)
40495 continue;
40496
40497 spin_unlock(&dcache_lock);
40498 - if (filldir(dirent, next->d_name.name,
40499 + name = next->d_name.name;
40500 + if (name == next->d_iname) {
40501 + memcpy(d_name, name, next->d_name.len);
40502 + name = d_name;
40503 + }
40504 + if (filldir(dirent, name,
40505 next->d_name.len, filp->f_pos,
40506 next->d_inode->i_ino,
40507 dt_type(next->d_inode)) < 0)
40508 diff -urNp linux-2.6.32.42/fs/lockd/clntproc.c linux-2.6.32.42/fs/lockd/clntproc.c
40509 --- linux-2.6.32.42/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
40510 +++ linux-2.6.32.42/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
40511 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
40512 /*
40513 * Cookie counter for NLM requests
40514 */
40515 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
40516 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
40517
40518 void nlmclnt_next_cookie(struct nlm_cookie *c)
40519 {
40520 - u32 cookie = atomic_inc_return(&nlm_cookie);
40521 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
40522
40523 memcpy(c->data, &cookie, 4);
40524 c->len=4;
40525 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
40526 struct nlm_rqst reqst, *req;
40527 int status;
40528
40529 + pax_track_stack();
40530 +
40531 req = &reqst;
40532 memset(req, 0, sizeof(*req));
40533 locks_init_lock(&req->a_args.lock.fl);
40534 diff -urNp linux-2.6.32.42/fs/lockd/svc.c linux-2.6.32.42/fs/lockd/svc.c
40535 --- linux-2.6.32.42/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
40536 +++ linux-2.6.32.42/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
40537 @@ -43,7 +43,7 @@
40538
40539 static struct svc_program nlmsvc_program;
40540
40541 -struct nlmsvc_binding * nlmsvc_ops;
40542 +const struct nlmsvc_binding * nlmsvc_ops;
40543 EXPORT_SYMBOL_GPL(nlmsvc_ops);
40544
40545 static DEFINE_MUTEX(nlmsvc_mutex);
40546 diff -urNp linux-2.6.32.42/fs/locks.c linux-2.6.32.42/fs/locks.c
40547 --- linux-2.6.32.42/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
40548 +++ linux-2.6.32.42/fs/locks.c 2011-04-17 15:56:46.000000000 -0400
40549 @@ -2007,16 +2007,16 @@ void locks_remove_flock(struct file *fil
40550 return;
40551
40552 if (filp->f_op && filp->f_op->flock) {
40553 - struct file_lock fl = {
40554 + struct file_lock flock = {
40555 .fl_pid = current->tgid,
40556 .fl_file = filp,
40557 .fl_flags = FL_FLOCK,
40558 .fl_type = F_UNLCK,
40559 .fl_end = OFFSET_MAX,
40560 };
40561 - filp->f_op->flock(filp, F_SETLKW, &fl);
40562 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
40563 - fl.fl_ops->fl_release_private(&fl);
40564 + filp->f_op->flock(filp, F_SETLKW, &flock);
40565 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
40566 + flock.fl_ops->fl_release_private(&flock);
40567 }
40568
40569 lock_kernel();
40570 diff -urNp linux-2.6.32.42/fs/namei.c linux-2.6.32.42/fs/namei.c
40571 --- linux-2.6.32.42/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
40572 +++ linux-2.6.32.42/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
40573 @@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
40574 return ret;
40575
40576 /*
40577 - * Read/write DACs are always overridable.
40578 - * Executable DACs are overridable if at least one exec bit is set.
40579 - */
40580 - if (!(mask & MAY_EXEC) || execute_ok(inode))
40581 - if (capable(CAP_DAC_OVERRIDE))
40582 - return 0;
40583 -
40584 - /*
40585 * Searching includes executable on directories, else just read.
40586 */
40587 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
40588 @@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
40589 if (capable(CAP_DAC_READ_SEARCH))
40590 return 0;
40591
40592 + /*
40593 + * Read/write DACs are always overridable.
40594 + * Executable DACs are overridable if at least one exec bit is set.
40595 + */
40596 + if (!(mask & MAY_EXEC) || execute_ok(inode))
40597 + if (capable(CAP_DAC_OVERRIDE))
40598 + return 0;
40599 +
40600 return -EACCES;
40601 }
40602
40603 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
40604 if (!ret)
40605 goto ok;
40606
40607 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
40608 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
40609 + capable(CAP_DAC_OVERRIDE))
40610 goto ok;
40611
40612 return ret;
40613 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
40614 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
40615 error = PTR_ERR(cookie);
40616 if (!IS_ERR(cookie)) {
40617 - char *s = nd_get_link(nd);
40618 + const char *s = nd_get_link(nd);
40619 error = 0;
40620 if (s)
40621 error = __vfs_follow_link(nd, s);
40622 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct
40623 err = security_inode_follow_link(path->dentry, nd);
40624 if (err)
40625 goto loop;
40626 +
40627 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
40628 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
40629 + err = -EACCES;
40630 + goto loop;
40631 + }
40632 +
40633 current->link_count++;
40634 current->total_link_count++;
40635 nd->depth++;
40636 @@ -1016,11 +1024,18 @@ return_reval:
40637 break;
40638 }
40639 return_base:
40640 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
40641 + path_put(&nd->path);
40642 + return -ENOENT;
40643 + }
40644 return 0;
40645 out_dput:
40646 path_put_conditional(&next, nd);
40647 break;
40648 }
40649 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
40650 + err = -ENOENT;
40651 +
40652 path_put(&nd->path);
40653 return_err:
40654 return err;
40655 @@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
40656 int retval = path_init(dfd, name, flags, nd);
40657 if (!retval)
40658 retval = path_walk(name, nd);
40659 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
40660 - nd->path.dentry->d_inode))
40661 - audit_inode(name, nd->path.dentry);
40662 +
40663 + if (likely(!retval)) {
40664 + if (nd->path.dentry && nd->path.dentry->d_inode) {
40665 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
40666 + retval = -ENOENT;
40667 + if (!audit_dummy_context())
40668 + audit_inode(name, nd->path.dentry);
40669 + }
40670 + }
40671 if (nd->root.mnt) {
40672 path_put(&nd->root);
40673 nd->root.mnt = NULL;
40674 }
40675 +
40676 return retval;
40677 }
40678
40679 @@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
40680 if (error)
40681 goto err_out;
40682
40683 +
40684 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
40685 + error = -EPERM;
40686 + goto err_out;
40687 + }
40688 + if (gr_handle_rawio(inode)) {
40689 + error = -EPERM;
40690 + goto err_out;
40691 + }
40692 + if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
40693 + error = -EACCES;
40694 + goto err_out;
40695 + }
40696 +
40697 if (flag & O_TRUNC) {
40698 error = get_write_access(inode);
40699 if (error)
40700 @@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
40701 int error;
40702 struct dentry *dir = nd->path.dentry;
40703
40704 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
40705 + error = -EACCES;
40706 + goto out_unlock;
40707 + }
40708 +
40709 if (!IS_POSIXACL(dir->d_inode))
40710 mode &= ~current_umask();
40711 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
40712 if (error)
40713 goto out_unlock;
40714 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
40715 + if (!error)
40716 + gr_handle_create(path->dentry, nd->path.mnt);
40717 out_unlock:
40718 mutex_unlock(&dir->d_inode->i_mutex);
40719 dput(nd->path.dentry);
40720 @@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
40721 &nd, flag);
40722 if (error)
40723 return ERR_PTR(error);
40724 +
40725 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
40726 + error = -EPERM;
40727 + goto exit;
40728 + }
40729 +
40730 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
40731 + error = -EPERM;
40732 + goto exit;
40733 + }
40734 +
40735 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
40736 + error = -EACCES;
40737 + goto exit;
40738 + }
40739 +
40740 goto ok;
40741 }
40742
40743 @@ -1795,6 +1854,14 @@ do_last:
40744 /*
40745 * It already exists.
40746 */
40747 +
40748 + /* only check if O_CREAT is specified, all other checks need
40749 + to go into may_open */
40750 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
40751 + error = -EACCES;
40752 + goto exit_mutex_unlock;
40753 + }
40754 +
40755 mutex_unlock(&dir->d_inode->i_mutex);
40756 audit_inode(pathname, path.dentry);
40757
40758 @@ -1887,6 +1954,13 @@ do_link:
40759 error = security_inode_follow_link(path.dentry, &nd);
40760 if (error)
40761 goto exit_dput;
40762 +
40763 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
40764 + path.dentry, nd.path.mnt)) {
40765 + error = -EACCES;
40766 + goto exit_dput;
40767 + }
40768 +
40769 error = __do_follow_link(&path, &nd);
40770 if (error) {
40771 /* Does someone understand code flow here? Or it is only
40772 @@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
40773 error = may_mknod(mode);
40774 if (error)
40775 goto out_dput;
40776 +
40777 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
40778 + error = -EPERM;
40779 + goto out_dput;
40780 + }
40781 +
40782 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
40783 + error = -EACCES;
40784 + goto out_dput;
40785 + }
40786 +
40787 error = mnt_want_write(nd.path.mnt);
40788 if (error)
40789 goto out_dput;
40790 @@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
40791 }
40792 out_drop_write:
40793 mnt_drop_write(nd.path.mnt);
40794 +
40795 + if (!error)
40796 + gr_handle_create(dentry, nd.path.mnt);
40797 out_dput:
40798 dput(dentry);
40799 out_unlock:
40800 @@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
40801 if (IS_ERR(dentry))
40802 goto out_unlock;
40803
40804 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
40805 + error = -EACCES;
40806 + goto out_dput;
40807 + }
40808 +
40809 if (!IS_POSIXACL(nd.path.dentry->d_inode))
40810 mode &= ~current_umask();
40811 error = mnt_want_write(nd.path.mnt);
40812 @@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
40813 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
40814 out_drop_write:
40815 mnt_drop_write(nd.path.mnt);
40816 +
40817 + if (!error)
40818 + gr_handle_create(dentry, nd.path.mnt);
40819 +
40820 out_dput:
40821 dput(dentry);
40822 out_unlock:
40823 @@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
40824 char * name;
40825 struct dentry *dentry;
40826 struct nameidata nd;
40827 + ino_t saved_ino = 0;
40828 + dev_t saved_dev = 0;
40829
40830 error = user_path_parent(dfd, pathname, &nd, &name);
40831 if (error)
40832 @@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
40833 error = PTR_ERR(dentry);
40834 if (IS_ERR(dentry))
40835 goto exit2;
40836 +
40837 + if (dentry->d_inode != NULL) {
40838 + if (dentry->d_inode->i_nlink <= 1) {
40839 + saved_ino = dentry->d_inode->i_ino;
40840 + saved_dev = gr_get_dev_from_dentry(dentry);
40841 + }
40842 +
40843 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
40844 + error = -EACCES;
40845 + goto exit3;
40846 + }
40847 + }
40848 +
40849 error = mnt_want_write(nd.path.mnt);
40850 if (error)
40851 goto exit3;
40852 @@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
40853 if (error)
40854 goto exit4;
40855 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
40856 + if (!error && (saved_dev || saved_ino))
40857 + gr_handle_delete(saved_ino, saved_dev);
40858 exit4:
40859 mnt_drop_write(nd.path.mnt);
40860 exit3:
40861 @@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
40862 struct dentry *dentry;
40863 struct nameidata nd;
40864 struct inode *inode = NULL;
40865 + ino_t saved_ino = 0;
40866 + dev_t saved_dev = 0;
40867
40868 error = user_path_parent(dfd, pathname, &nd, &name);
40869 if (error)
40870 @@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
40871 if (nd.last.name[nd.last.len])
40872 goto slashes;
40873 inode = dentry->d_inode;
40874 - if (inode)
40875 + if (inode) {
40876 + if (inode->i_nlink <= 1) {
40877 + saved_ino = inode->i_ino;
40878 + saved_dev = gr_get_dev_from_dentry(dentry);
40879 + }
40880 +
40881 atomic_inc(&inode->i_count);
40882 +
40883 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
40884 + error = -EACCES;
40885 + goto exit2;
40886 + }
40887 + }
40888 error = mnt_want_write(nd.path.mnt);
40889 if (error)
40890 goto exit2;
40891 @@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
40892 if (error)
40893 goto exit3;
40894 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
40895 + if (!error && (saved_ino || saved_dev))
40896 + gr_handle_delete(saved_ino, saved_dev);
40897 exit3:
40898 mnt_drop_write(nd.path.mnt);
40899 exit2:
40900 @@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
40901 if (IS_ERR(dentry))
40902 goto out_unlock;
40903
40904 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
40905 + error = -EACCES;
40906 + goto out_dput;
40907 + }
40908 +
40909 error = mnt_want_write(nd.path.mnt);
40910 if (error)
40911 goto out_dput;
40912 @@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
40913 if (error)
40914 goto out_drop_write;
40915 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
40916 + if (!error)
40917 + gr_handle_create(dentry, nd.path.mnt);
40918 out_drop_write:
40919 mnt_drop_write(nd.path.mnt);
40920 out_dput:
40921 @@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
40922 error = PTR_ERR(new_dentry);
40923 if (IS_ERR(new_dentry))
40924 goto out_unlock;
40925 +
40926 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
40927 + old_path.dentry->d_inode,
40928 + old_path.dentry->d_inode->i_mode, to)) {
40929 + error = -EACCES;
40930 + goto out_dput;
40931 + }
40932 +
40933 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
40934 + old_path.dentry, old_path.mnt, to)) {
40935 + error = -EACCES;
40936 + goto out_dput;
40937 + }
40938 +
40939 error = mnt_want_write(nd.path.mnt);
40940 if (error)
40941 goto out_dput;
40942 @@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
40943 if (error)
40944 goto out_drop_write;
40945 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
40946 + if (!error)
40947 + gr_handle_create(new_dentry, nd.path.mnt);
40948 out_drop_write:
40949 mnt_drop_write(nd.path.mnt);
40950 out_dput:
40951 @@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40952 char *to;
40953 int error;
40954
40955 + pax_track_stack();
40956 +
40957 error = user_path_parent(olddfd, oldname, &oldnd, &from);
40958 if (error)
40959 goto exit;
40960 @@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40961 if (new_dentry == trap)
40962 goto exit5;
40963
40964 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
40965 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
40966 + to);
40967 + if (error)
40968 + goto exit5;
40969 +
40970 error = mnt_want_write(oldnd.path.mnt);
40971 if (error)
40972 goto exit5;
40973 @@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40974 goto exit6;
40975 error = vfs_rename(old_dir->d_inode, old_dentry,
40976 new_dir->d_inode, new_dentry);
40977 + if (!error)
40978 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
40979 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
40980 exit6:
40981 mnt_drop_write(oldnd.path.mnt);
40982 exit5:
40983 @@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
40984
40985 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
40986 {
40987 + char tmpbuf[64];
40988 + const char *newlink;
40989 int len;
40990
40991 len = PTR_ERR(link);
40992 @@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
40993 len = strlen(link);
40994 if (len > (unsigned) buflen)
40995 len = buflen;
40996 - if (copy_to_user(buffer, link, len))
40997 +
40998 + if (len < sizeof(tmpbuf)) {
40999 + memcpy(tmpbuf, link, len);
41000 + newlink = tmpbuf;
41001 + } else
41002 + newlink = link;
41003 +
41004 + if (copy_to_user(buffer, newlink, len))
41005 len = -EFAULT;
41006 out:
41007 return len;
41008 diff -urNp linux-2.6.32.42/fs/namespace.c linux-2.6.32.42/fs/namespace.c
41009 --- linux-2.6.32.42/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
41010 +++ linux-2.6.32.42/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
41011 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
41012 if (!(sb->s_flags & MS_RDONLY))
41013 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
41014 up_write(&sb->s_umount);
41015 +
41016 + gr_log_remount(mnt->mnt_devname, retval);
41017 +
41018 return retval;
41019 }
41020
41021 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
41022 security_sb_umount_busy(mnt);
41023 up_write(&namespace_sem);
41024 release_mounts(&umount_list);
41025 +
41026 + gr_log_unmount(mnt->mnt_devname, retval);
41027 +
41028 return retval;
41029 }
41030
41031 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
41032 if (retval)
41033 goto dput_out;
41034
41035 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
41036 + retval = -EPERM;
41037 + goto dput_out;
41038 + }
41039 +
41040 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
41041 + retval = -EPERM;
41042 + goto dput_out;
41043 + }
41044 +
41045 if (flags & MS_REMOUNT)
41046 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
41047 data_page);
41048 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
41049 dev_name, data_page);
41050 dput_out:
41051 path_put(&path);
41052 +
41053 + gr_log_mount(dev_name, dir_name, retval);
41054 +
41055 return retval;
41056 }
41057
41058 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
41059 goto out1;
41060 }
41061
41062 + if (gr_handle_chroot_pivot()) {
41063 + error = -EPERM;
41064 + path_put(&old);
41065 + goto out1;
41066 + }
41067 +
41068 read_lock(&current->fs->lock);
41069 root = current->fs->root;
41070 path_get(&current->fs->root);
41071 diff -urNp linux-2.6.32.42/fs/ncpfs/dir.c linux-2.6.32.42/fs/ncpfs/dir.c
41072 --- linux-2.6.32.42/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41073 +++ linux-2.6.32.42/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
41074 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
41075 int res, val = 0, len;
41076 __u8 __name[NCP_MAXPATHLEN + 1];
41077
41078 + pax_track_stack();
41079 +
41080 parent = dget_parent(dentry);
41081 dir = parent->d_inode;
41082
41083 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
41084 int error, res, len;
41085 __u8 __name[NCP_MAXPATHLEN + 1];
41086
41087 + pax_track_stack();
41088 +
41089 lock_kernel();
41090 error = -EIO;
41091 if (!ncp_conn_valid(server))
41092 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
41093 int error, result, len;
41094 int opmode;
41095 __u8 __name[NCP_MAXPATHLEN + 1];
41096 -
41097 +
41098 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
41099 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
41100
41101 + pax_track_stack();
41102 +
41103 error = -EIO;
41104 lock_kernel();
41105 if (!ncp_conn_valid(server))
41106 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
41107 int error, len;
41108 __u8 __name[NCP_MAXPATHLEN + 1];
41109
41110 + pax_track_stack();
41111 +
41112 DPRINTK("ncp_mkdir: making %s/%s\n",
41113 dentry->d_parent->d_name.name, dentry->d_name.name);
41114
41115 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
41116 if (!ncp_conn_valid(server))
41117 goto out;
41118
41119 + pax_track_stack();
41120 +
41121 ncp_age_dentry(server, dentry);
41122 len = sizeof(__name);
41123 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
41124 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
41125 int old_len, new_len;
41126 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
41127
41128 + pax_track_stack();
41129 +
41130 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
41131 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
41132 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
41133 diff -urNp linux-2.6.32.42/fs/ncpfs/inode.c linux-2.6.32.42/fs/ncpfs/inode.c
41134 --- linux-2.6.32.42/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
41135 +++ linux-2.6.32.42/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
41136 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
41137 #endif
41138 struct ncp_entry_info finfo;
41139
41140 + pax_track_stack();
41141 +
41142 data.wdog_pid = NULL;
41143 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
41144 if (!server)
41145 diff -urNp linux-2.6.32.42/fs/nfs/inode.c linux-2.6.32.42/fs/nfs/inode.c
41146 --- linux-2.6.32.42/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
41147 +++ linux-2.6.32.42/fs/nfs/inode.c 2011-05-10 22:12:33.000000000 -0400
41148 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
41149 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
41150 }
41151
41152 -static atomic_long_t nfs_attr_generation_counter;
41153 +static atomic_long_unchecked_t nfs_attr_generation_counter;
41154
41155 static unsigned long nfs_read_attr_generation_counter(void)
41156 {
41157 - return atomic_long_read(&nfs_attr_generation_counter);
41158 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
41159 }
41160
41161 unsigned long nfs_inc_attr_generation_counter(void)
41162 {
41163 - return atomic_long_inc_return(&nfs_attr_generation_counter);
41164 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
41165 }
41166
41167 void nfs_fattr_init(struct nfs_fattr *fattr)
41168 diff -urNp linux-2.6.32.42/fs/nfsd/lockd.c linux-2.6.32.42/fs/nfsd/lockd.c
41169 --- linux-2.6.32.42/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
41170 +++ linux-2.6.32.42/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
41171 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
41172 fput(filp);
41173 }
41174
41175 -static struct nlmsvc_binding nfsd_nlm_ops = {
41176 +static const struct nlmsvc_binding nfsd_nlm_ops = {
41177 .fopen = nlm_fopen, /* open file for locking */
41178 .fclose = nlm_fclose, /* close file */
41179 };
41180 diff -urNp linux-2.6.32.42/fs/nfsd/nfs4state.c linux-2.6.32.42/fs/nfsd/nfs4state.c
41181 --- linux-2.6.32.42/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
41182 +++ linux-2.6.32.42/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
41183 @@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
41184 unsigned int cmd;
41185 int err;
41186
41187 + pax_track_stack();
41188 +
41189 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
41190 (long long) lock->lk_offset,
41191 (long long) lock->lk_length);
41192 diff -urNp linux-2.6.32.42/fs/nfsd/nfs4xdr.c linux-2.6.32.42/fs/nfsd/nfs4xdr.c
41193 --- linux-2.6.32.42/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
41194 +++ linux-2.6.32.42/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
41195 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
41196 struct nfsd4_compoundres *resp = rqstp->rq_resp;
41197 u32 minorversion = resp->cstate.minorversion;
41198
41199 + pax_track_stack();
41200 +
41201 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
41202 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
41203 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
41204 diff -urNp linux-2.6.32.42/fs/nfsd/vfs.c linux-2.6.32.42/fs/nfsd/vfs.c
41205 --- linux-2.6.32.42/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
41206 +++ linux-2.6.32.42/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
41207 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
41208 } else {
41209 oldfs = get_fs();
41210 set_fs(KERNEL_DS);
41211 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
41212 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
41213 set_fs(oldfs);
41214 }
41215
41216 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
41217
41218 /* Write the data. */
41219 oldfs = get_fs(); set_fs(KERNEL_DS);
41220 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
41221 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
41222 set_fs(oldfs);
41223 if (host_err < 0)
41224 goto out_nfserr;
41225 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
41226 */
41227
41228 oldfs = get_fs(); set_fs(KERNEL_DS);
41229 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
41230 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
41231 set_fs(oldfs);
41232
41233 if (host_err < 0)
41234 diff -urNp linux-2.6.32.42/fs/nilfs2/ioctl.c linux-2.6.32.42/fs/nilfs2/ioctl.c
41235 --- linux-2.6.32.42/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
41236 +++ linux-2.6.32.42/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
41237 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
41238 unsigned int cmd, void __user *argp)
41239 {
41240 struct nilfs_argv argv[5];
41241 - const static size_t argsz[5] = {
41242 + static const size_t argsz[5] = {
41243 sizeof(struct nilfs_vdesc),
41244 sizeof(struct nilfs_period),
41245 sizeof(__u64),
41246 diff -urNp linux-2.6.32.42/fs/notify/dnotify/dnotify.c linux-2.6.32.42/fs/notify/dnotify/dnotify.c
41247 --- linux-2.6.32.42/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
41248 +++ linux-2.6.32.42/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
41249 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
41250 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
41251 }
41252
41253 -static struct fsnotify_ops dnotify_fsnotify_ops = {
41254 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
41255 .handle_event = dnotify_handle_event,
41256 .should_send_event = dnotify_should_send_event,
41257 .free_group_priv = NULL,
41258 diff -urNp linux-2.6.32.42/fs/notify/notification.c linux-2.6.32.42/fs/notify/notification.c
41259 --- linux-2.6.32.42/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
41260 +++ linux-2.6.32.42/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
41261 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
41262 * get set to 0 so it will never get 'freed'
41263 */
41264 static struct fsnotify_event q_overflow_event;
41265 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41266 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41267
41268 /**
41269 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
41270 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
41271 */
41272 u32 fsnotify_get_cookie(void)
41273 {
41274 - return atomic_inc_return(&fsnotify_sync_cookie);
41275 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
41276 }
41277 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
41278
41279 diff -urNp linux-2.6.32.42/fs/ntfs/dir.c linux-2.6.32.42/fs/ntfs/dir.c
41280 --- linux-2.6.32.42/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41281 +++ linux-2.6.32.42/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
41282 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
41283 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
41284 ~(s64)(ndir->itype.index.block_size - 1)));
41285 /* Bounds checks. */
41286 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41287 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41288 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
41289 "inode 0x%lx or driver bug.", vdir->i_ino);
41290 goto err_out;
41291 diff -urNp linux-2.6.32.42/fs/ntfs/file.c linux-2.6.32.42/fs/ntfs/file.c
41292 --- linux-2.6.32.42/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
41293 +++ linux-2.6.32.42/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
41294 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
41295 #endif /* NTFS_RW */
41296 };
41297
41298 -const struct file_operations ntfs_empty_file_ops = {};
41299 +const struct file_operations ntfs_empty_file_ops __read_only;
41300
41301 -const struct inode_operations ntfs_empty_inode_ops = {};
41302 +const struct inode_operations ntfs_empty_inode_ops __read_only;
41303 diff -urNp linux-2.6.32.42/fs/ocfs2/cluster/masklog.c linux-2.6.32.42/fs/ocfs2/cluster/masklog.c
41304 --- linux-2.6.32.42/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
41305 +++ linux-2.6.32.42/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
41306 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
41307 return mlog_mask_store(mlog_attr->mask, buf, count);
41308 }
41309
41310 -static struct sysfs_ops mlog_attr_ops = {
41311 +static const struct sysfs_ops mlog_attr_ops = {
41312 .show = mlog_show,
41313 .store = mlog_store,
41314 };
41315 diff -urNp linux-2.6.32.42/fs/ocfs2/localalloc.c linux-2.6.32.42/fs/ocfs2/localalloc.c
41316 --- linux-2.6.32.42/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
41317 +++ linux-2.6.32.42/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
41318 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
41319 goto bail;
41320 }
41321
41322 - atomic_inc(&osb->alloc_stats.moves);
41323 + atomic_inc_unchecked(&osb->alloc_stats.moves);
41324
41325 status = 0;
41326 bail:
41327 diff -urNp linux-2.6.32.42/fs/ocfs2/namei.c linux-2.6.32.42/fs/ocfs2/namei.c
41328 --- linux-2.6.32.42/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
41329 +++ linux-2.6.32.42/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
41330 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
41331 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
41332 struct ocfs2_dir_lookup_result target_insert = { NULL, };
41333
41334 + pax_track_stack();
41335 +
41336 /* At some point it might be nice to break this function up a
41337 * bit. */
41338
41339 diff -urNp linux-2.6.32.42/fs/ocfs2/ocfs2.h linux-2.6.32.42/fs/ocfs2/ocfs2.h
41340 --- linux-2.6.32.42/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
41341 +++ linux-2.6.32.42/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
41342 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
41343
41344 struct ocfs2_alloc_stats
41345 {
41346 - atomic_t moves;
41347 - atomic_t local_data;
41348 - atomic_t bitmap_data;
41349 - atomic_t bg_allocs;
41350 - atomic_t bg_extends;
41351 + atomic_unchecked_t moves;
41352 + atomic_unchecked_t local_data;
41353 + atomic_unchecked_t bitmap_data;
41354 + atomic_unchecked_t bg_allocs;
41355 + atomic_unchecked_t bg_extends;
41356 };
41357
41358 enum ocfs2_local_alloc_state
41359 diff -urNp linux-2.6.32.42/fs/ocfs2/suballoc.c linux-2.6.32.42/fs/ocfs2/suballoc.c
41360 --- linux-2.6.32.42/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
41361 +++ linux-2.6.32.42/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
41362 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
41363 mlog_errno(status);
41364 goto bail;
41365 }
41366 - atomic_inc(&osb->alloc_stats.bg_extends);
41367 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
41368
41369 /* You should never ask for this much metadata */
41370 BUG_ON(bits_wanted >
41371 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
41372 mlog_errno(status);
41373 goto bail;
41374 }
41375 - atomic_inc(&osb->alloc_stats.bg_allocs);
41376 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
41377
41378 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
41379 ac->ac_bits_given += (*num_bits);
41380 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
41381 mlog_errno(status);
41382 goto bail;
41383 }
41384 - atomic_inc(&osb->alloc_stats.bg_allocs);
41385 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
41386
41387 BUG_ON(num_bits != 1);
41388
41389 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
41390 cluster_start,
41391 num_clusters);
41392 if (!status)
41393 - atomic_inc(&osb->alloc_stats.local_data);
41394 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
41395 } else {
41396 if (min_clusters > (osb->bitmap_cpg - 1)) {
41397 /* The only paths asking for contiguousness
41398 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
41399 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
41400 bg_blkno,
41401 bg_bit_off);
41402 - atomic_inc(&osb->alloc_stats.bitmap_data);
41403 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
41404 }
41405 }
41406 if (status < 0) {
41407 diff -urNp linux-2.6.32.42/fs/ocfs2/super.c linux-2.6.32.42/fs/ocfs2/super.c
41408 --- linux-2.6.32.42/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
41409 +++ linux-2.6.32.42/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
41410 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
41411 "%10s => GlobalAllocs: %d LocalAllocs: %d "
41412 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
41413 "Stats",
41414 - atomic_read(&osb->alloc_stats.bitmap_data),
41415 - atomic_read(&osb->alloc_stats.local_data),
41416 - atomic_read(&osb->alloc_stats.bg_allocs),
41417 - atomic_read(&osb->alloc_stats.moves),
41418 - atomic_read(&osb->alloc_stats.bg_extends));
41419 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
41420 + atomic_read_unchecked(&osb->alloc_stats.local_data),
41421 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
41422 + atomic_read_unchecked(&osb->alloc_stats.moves),
41423 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
41424
41425 out += snprintf(buf + out, len - out,
41426 "%10s => State: %u Descriptor: %llu Size: %u bits "
41427 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
41428 spin_lock_init(&osb->osb_xattr_lock);
41429 ocfs2_init_inode_steal_slot(osb);
41430
41431 - atomic_set(&osb->alloc_stats.moves, 0);
41432 - atomic_set(&osb->alloc_stats.local_data, 0);
41433 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
41434 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
41435 - atomic_set(&osb->alloc_stats.bg_extends, 0);
41436 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
41437 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
41438 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
41439 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
41440 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
41441
41442 /* Copy the blockcheck stats from the superblock probe */
41443 osb->osb_ecc_stats = *stats;
41444 diff -urNp linux-2.6.32.42/fs/open.c linux-2.6.32.42/fs/open.c
41445 --- linux-2.6.32.42/fs/open.c 2011-03-27 14:31:47.000000000 -0400
41446 +++ linux-2.6.32.42/fs/open.c 2011-04-17 15:56:46.000000000 -0400
41447 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
41448 error = locks_verify_truncate(inode, NULL, length);
41449 if (!error)
41450 error = security_path_truncate(&path, length, 0);
41451 +
41452 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
41453 + error = -EACCES;
41454 +
41455 if (!error) {
41456 vfs_dq_init(inode);
41457 error = do_truncate(path.dentry, length, 0, NULL);
41458 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
41459 if (__mnt_is_readonly(path.mnt))
41460 res = -EROFS;
41461
41462 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
41463 + res = -EACCES;
41464 +
41465 out_path_release:
41466 path_put(&path);
41467 out:
41468 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
41469 if (error)
41470 goto dput_and_out;
41471
41472 + gr_log_chdir(path.dentry, path.mnt);
41473 +
41474 set_fs_pwd(current->fs, &path);
41475
41476 dput_and_out:
41477 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
41478 goto out_putf;
41479
41480 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
41481 +
41482 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
41483 + error = -EPERM;
41484 +
41485 + if (!error)
41486 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
41487 +
41488 if (!error)
41489 set_fs_pwd(current->fs, &file->f_path);
41490 out_putf:
41491 @@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
41492 if (!capable(CAP_SYS_CHROOT))
41493 goto dput_and_out;
41494
41495 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
41496 + goto dput_and_out;
41497 +
41498 + if (gr_handle_chroot_caps(&path)) {
41499 + error = -ENOMEM;
41500 + goto dput_and_out;
41501 + }
41502 +
41503 set_fs_root(current->fs, &path);
41504 +
41505 + gr_handle_chroot_chdir(&path);
41506 +
41507 error = 0;
41508 dput_and_out:
41509 path_put(&path);
41510 @@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
41511 err = mnt_want_write_file(file);
41512 if (err)
41513 goto out_putf;
41514 +
41515 mutex_lock(&inode->i_mutex);
41516 +
41517 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
41518 + err = -EACCES;
41519 + goto out_unlock;
41520 + }
41521 +
41522 if (mode == (mode_t) -1)
41523 mode = inode->i_mode;
41524 +
41525 + if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
41526 + err = -EPERM;
41527 + goto out_unlock;
41528 + }
41529 +
41530 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
41531 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
41532 err = notify_change(dentry, &newattrs);
41533 +
41534 +out_unlock:
41535 mutex_unlock(&inode->i_mutex);
41536 mnt_drop_write(file->f_path.mnt);
41537 out_putf:
41538 @@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
41539 error = mnt_want_write(path.mnt);
41540 if (error)
41541 goto dput_and_out;
41542 +
41543 mutex_lock(&inode->i_mutex);
41544 +
41545 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
41546 + error = -EACCES;
41547 + goto out_unlock;
41548 + }
41549 +
41550 if (mode == (mode_t) -1)
41551 mode = inode->i_mode;
41552 +
41553 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
41554 + error = -EACCES;
41555 + goto out_unlock;
41556 + }
41557 +
41558 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
41559 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
41560 error = notify_change(path.dentry, &newattrs);
41561 +
41562 +out_unlock:
41563 mutex_unlock(&inode->i_mutex);
41564 mnt_drop_write(path.mnt);
41565 dput_and_out:
41566 @@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
41567 return sys_fchmodat(AT_FDCWD, filename, mode);
41568 }
41569
41570 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
41571 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
41572 {
41573 struct inode *inode = dentry->d_inode;
41574 int error;
41575 struct iattr newattrs;
41576
41577 + if (!gr_acl_handle_chown(dentry, mnt))
41578 + return -EACCES;
41579 +
41580 newattrs.ia_valid = ATTR_CTIME;
41581 if (user != (uid_t) -1) {
41582 newattrs.ia_valid |= ATTR_UID;
41583 @@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
41584 error = mnt_want_write(path.mnt);
41585 if (error)
41586 goto out_release;
41587 - error = chown_common(path.dentry, user, group);
41588 + error = chown_common(path.dentry, user, group, path.mnt);
41589 mnt_drop_write(path.mnt);
41590 out_release:
41591 path_put(&path);
41592 @@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
41593 error = mnt_want_write(path.mnt);
41594 if (error)
41595 goto out_release;
41596 - error = chown_common(path.dentry, user, group);
41597 + error = chown_common(path.dentry, user, group, path.mnt);
41598 mnt_drop_write(path.mnt);
41599 out_release:
41600 path_put(&path);
41601 @@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
41602 error = mnt_want_write(path.mnt);
41603 if (error)
41604 goto out_release;
41605 - error = chown_common(path.dentry, user, group);
41606 + error = chown_common(path.dentry, user, group, path.mnt);
41607 mnt_drop_write(path.mnt);
41608 out_release:
41609 path_put(&path);
41610 @@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
41611 goto out_fput;
41612 dentry = file->f_path.dentry;
41613 audit_inode(NULL, dentry);
41614 - error = chown_common(dentry, user, group);
41615 + error = chown_common(dentry, user, group, file->f_path.mnt);
41616 mnt_drop_write(file->f_path.mnt);
41617 out_fput:
41618 fput(file);
41619 @@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
41620 if (!IS_ERR(tmp)) {
41621 fd = get_unused_fd_flags(flags);
41622 if (fd >= 0) {
41623 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
41624 + struct file *f;
41625 + /* don't allow to be set by userland */
41626 + flags &= ~FMODE_GREXEC;
41627 + f = do_filp_open(dfd, tmp, flags, mode, 0);
41628 if (IS_ERR(f)) {
41629 put_unused_fd(fd);
41630 fd = PTR_ERR(f);
41631 diff -urNp linux-2.6.32.42/fs/partitions/ldm.c linux-2.6.32.42/fs/partitions/ldm.c
41632 --- linux-2.6.32.42/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
41633 +++ linux-2.6.32.42/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
41634 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
41635 ldm_error ("A VBLK claims to have %d parts.", num);
41636 return false;
41637 }
41638 +
41639 if (rec >= num) {
41640 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
41641 return false;
41642 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
41643 goto found;
41644 }
41645
41646 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
41647 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
41648 if (!f) {
41649 ldm_crit ("Out of memory.");
41650 return false;
41651 diff -urNp linux-2.6.32.42/fs/partitions/mac.c linux-2.6.32.42/fs/partitions/mac.c
41652 --- linux-2.6.32.42/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
41653 +++ linux-2.6.32.42/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
41654 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
41655 return 0; /* not a MacOS disk */
41656 }
41657 blocks_in_map = be32_to_cpu(part->map_count);
41658 + printk(" [mac]");
41659 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
41660 put_dev_sector(sect);
41661 return 0;
41662 }
41663 - printk(" [mac]");
41664 for (slot = 1; slot <= blocks_in_map; ++slot) {
41665 int pos = slot * secsize;
41666 put_dev_sector(sect);
41667 diff -urNp linux-2.6.32.42/fs/pipe.c linux-2.6.32.42/fs/pipe.c
41668 --- linux-2.6.32.42/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
41669 +++ linux-2.6.32.42/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
41670 @@ -401,9 +401,9 @@ redo:
41671 }
41672 if (bufs) /* More to do? */
41673 continue;
41674 - if (!pipe->writers)
41675 + if (!atomic_read(&pipe->writers))
41676 break;
41677 - if (!pipe->waiting_writers) {
41678 + if (!atomic_read(&pipe->waiting_writers)) {
41679 /* syscall merging: Usually we must not sleep
41680 * if O_NONBLOCK is set, or if we got some data.
41681 * But if a writer sleeps in kernel space, then
41682 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
41683 mutex_lock(&inode->i_mutex);
41684 pipe = inode->i_pipe;
41685
41686 - if (!pipe->readers) {
41687 + if (!atomic_read(&pipe->readers)) {
41688 send_sig(SIGPIPE, current, 0);
41689 ret = -EPIPE;
41690 goto out;
41691 @@ -511,7 +511,7 @@ redo1:
41692 for (;;) {
41693 int bufs;
41694
41695 - if (!pipe->readers) {
41696 + if (!atomic_read(&pipe->readers)) {
41697 send_sig(SIGPIPE, current, 0);
41698 if (!ret)
41699 ret = -EPIPE;
41700 @@ -597,9 +597,9 @@ redo2:
41701 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41702 do_wakeup = 0;
41703 }
41704 - pipe->waiting_writers++;
41705 + atomic_inc(&pipe->waiting_writers);
41706 pipe_wait(pipe);
41707 - pipe->waiting_writers--;
41708 + atomic_dec(&pipe->waiting_writers);
41709 }
41710 out:
41711 mutex_unlock(&inode->i_mutex);
41712 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
41713 mask = 0;
41714 if (filp->f_mode & FMODE_READ) {
41715 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
41716 - if (!pipe->writers && filp->f_version != pipe->w_counter)
41717 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
41718 mask |= POLLHUP;
41719 }
41720
41721 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
41722 * Most Unices do not set POLLERR for FIFOs but on Linux they
41723 * behave exactly like pipes for poll().
41724 */
41725 - if (!pipe->readers)
41726 + if (!atomic_read(&pipe->readers))
41727 mask |= POLLERR;
41728 }
41729
41730 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
41731
41732 mutex_lock(&inode->i_mutex);
41733 pipe = inode->i_pipe;
41734 - pipe->readers -= decr;
41735 - pipe->writers -= decw;
41736 + atomic_sub(decr, &pipe->readers);
41737 + atomic_sub(decw, &pipe->writers);
41738
41739 - if (!pipe->readers && !pipe->writers) {
41740 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
41741 free_pipe_info(inode);
41742 } else {
41743 wake_up_interruptible_sync(&pipe->wait);
41744 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
41745
41746 if (inode->i_pipe) {
41747 ret = 0;
41748 - inode->i_pipe->readers++;
41749 + atomic_inc(&inode->i_pipe->readers);
41750 }
41751
41752 mutex_unlock(&inode->i_mutex);
41753 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
41754
41755 if (inode->i_pipe) {
41756 ret = 0;
41757 - inode->i_pipe->writers++;
41758 + atomic_inc(&inode->i_pipe->writers);
41759 }
41760
41761 mutex_unlock(&inode->i_mutex);
41762 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
41763 if (inode->i_pipe) {
41764 ret = 0;
41765 if (filp->f_mode & FMODE_READ)
41766 - inode->i_pipe->readers++;
41767 + atomic_inc(&inode->i_pipe->readers);
41768 if (filp->f_mode & FMODE_WRITE)
41769 - inode->i_pipe->writers++;
41770 + atomic_inc(&inode->i_pipe->writers);
41771 }
41772
41773 mutex_unlock(&inode->i_mutex);
41774 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
41775 inode->i_pipe = NULL;
41776 }
41777
41778 -static struct vfsmount *pipe_mnt __read_mostly;
41779 +struct vfsmount *pipe_mnt __read_mostly;
41780 static int pipefs_delete_dentry(struct dentry *dentry)
41781 {
41782 /*
41783 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
41784 goto fail_iput;
41785 inode->i_pipe = pipe;
41786
41787 - pipe->readers = pipe->writers = 1;
41788 + atomic_set(&pipe->readers, 1);
41789 + atomic_set(&pipe->writers, 1);
41790 inode->i_fop = &rdwr_pipefifo_fops;
41791
41792 /*
41793 diff -urNp linux-2.6.32.42/fs/proc/array.c linux-2.6.32.42/fs/proc/array.c
41794 --- linux-2.6.32.42/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
41795 +++ linux-2.6.32.42/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
41796 @@ -60,6 +60,7 @@
41797 #include <linux/tty.h>
41798 #include <linux/string.h>
41799 #include <linux/mman.h>
41800 +#include <linux/grsecurity.h>
41801 #include <linux/proc_fs.h>
41802 #include <linux/ioport.h>
41803 #include <linux/uaccess.h>
41804 @@ -321,6 +322,21 @@ static inline void task_context_switch_c
41805 p->nivcsw);
41806 }
41807
41808 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41809 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
41810 +{
41811 + if (p->mm)
41812 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
41813 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
41814 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
41815 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
41816 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
41817 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
41818 + else
41819 + seq_printf(m, "PaX:\t-----\n");
41820 +}
41821 +#endif
41822 +
41823 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
41824 struct pid *pid, struct task_struct *task)
41825 {
41826 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
41827 task_cap(m, task);
41828 cpuset_task_status_allowed(m, task);
41829 task_context_switch_counts(m, task);
41830 +
41831 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41832 + task_pax(m, task);
41833 +#endif
41834 +
41835 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
41836 + task_grsec_rbac(m, task);
41837 +#endif
41838 +
41839 return 0;
41840 }
41841
41842 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41843 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
41844 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
41845 + _mm->pax_flags & MF_PAX_SEGMEXEC))
41846 +#endif
41847 +
41848 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
41849 struct pid *pid, struct task_struct *task, int whole)
41850 {
41851 @@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
41852 cputime_t cutime, cstime, utime, stime;
41853 cputime_t cgtime, gtime;
41854 unsigned long rsslim = 0;
41855 - char tcomm[sizeof(task->comm)];
41856 + char tcomm[sizeof(task->comm)] = { 0 };
41857 unsigned long flags;
41858
41859 + pax_track_stack();
41860 +
41861 state = *get_task_state(task);
41862 vsize = eip = esp = 0;
41863 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
41864 @@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
41865 gtime = task_gtime(task);
41866 }
41867
41868 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41869 + if (PAX_RAND_FLAGS(mm)) {
41870 + eip = 0;
41871 + esp = 0;
41872 + wchan = 0;
41873 + }
41874 +#endif
41875 +#ifdef CONFIG_GRKERNSEC_HIDESYM
41876 + wchan = 0;
41877 + eip =0;
41878 + esp =0;
41879 +#endif
41880 +
41881 /* scale priority and nice values from timeslices to -20..20 */
41882 /* to make it look like a "normal" Unix priority/nice value */
41883 priority = task_prio(task);
41884 @@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
41885 vsize,
41886 mm ? get_mm_rss(mm) : 0,
41887 rsslim,
41888 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41889 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
41890 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
41891 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
41892 +#else
41893 mm ? (permitted ? mm->start_code : 1) : 0,
41894 mm ? (permitted ? mm->end_code : 1) : 0,
41895 (permitted && mm) ? mm->start_stack : 0,
41896 +#endif
41897 esp,
41898 eip,
41899 /* The signal information here is obsolete.
41900 @@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
41901
41902 return 0;
41903 }
41904 +
41905 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
41906 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
41907 +{
41908 + u32 curr_ip = 0;
41909 + unsigned long flags;
41910 +
41911 + if (lock_task_sighand(task, &flags)) {
41912 + curr_ip = task->signal->curr_ip;
41913 + unlock_task_sighand(task, &flags);
41914 + }
41915 +
41916 + return sprintf(buffer, "%pI4\n", &curr_ip);
41917 +}
41918 +#endif
41919 diff -urNp linux-2.6.32.42/fs/proc/base.c linux-2.6.32.42/fs/proc/base.c
41920 --- linux-2.6.32.42/fs/proc/base.c 2011-04-22 19:16:29.000000000 -0400
41921 +++ linux-2.6.32.42/fs/proc/base.c 2011-06-04 21:20:50.000000000 -0400
41922 @@ -102,6 +102,22 @@ struct pid_entry {
41923 union proc_op op;
41924 };
41925
41926 +struct getdents_callback {
41927 + struct linux_dirent __user * current_dir;
41928 + struct linux_dirent __user * previous;
41929 + struct file * file;
41930 + int count;
41931 + int error;
41932 +};
41933 +
41934 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
41935 + loff_t offset, u64 ino, unsigned int d_type)
41936 +{
41937 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
41938 + buf->error = -EINVAL;
41939 + return 0;
41940 +}
41941 +
41942 #define NOD(NAME, MODE, IOP, FOP, OP) { \
41943 .name = (NAME), \
41944 .len = sizeof(NAME) - 1, \
41945 @@ -213,6 +229,9 @@ static int check_mem_permission(struct t
41946 if (task == current)
41947 return 0;
41948
41949 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
41950 + return -EPERM;
41951 +
41952 /*
41953 * If current is actively ptrace'ing, and would also be
41954 * permitted to freshly attach with ptrace now, permit it.
41955 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
41956 if (!mm->arg_end)
41957 goto out_mm; /* Shh! No looking before we're done */
41958
41959 + if (gr_acl_handle_procpidmem(task))
41960 + goto out_mm;
41961 +
41962 len = mm->arg_end - mm->arg_start;
41963
41964 if (len > PAGE_SIZE)
41965 @@ -287,12 +309,28 @@ out:
41966 return res;
41967 }
41968
41969 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41970 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
41971 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
41972 + _mm->pax_flags & MF_PAX_SEGMEXEC))
41973 +#endif
41974 +
41975 static int proc_pid_auxv(struct task_struct *task, char *buffer)
41976 {
41977 int res = 0;
41978 struct mm_struct *mm = get_task_mm(task);
41979 if (mm) {
41980 unsigned int nwords = 0;
41981 +
41982 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41983 + /* allow if we're currently ptracing this task */
41984 + if (PAX_RAND_FLAGS(mm) &&
41985 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
41986 + mmput(mm);
41987 + return res;
41988 + }
41989 +#endif
41990 +
41991 do {
41992 nwords += 2;
41993 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
41994 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
41995 }
41996
41997
41998 -#ifdef CONFIG_KALLSYMS
41999 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42000 /*
42001 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
42002 * Returns the resolved symbol. If that fails, simply return the address.
42003 @@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
42004 }
42005 #endif /* CONFIG_KALLSYMS */
42006
42007 -#ifdef CONFIG_STACKTRACE
42008 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42009
42010 #define MAX_STACK_TRACE_DEPTH 64
42011
42012 @@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
42013 return count;
42014 }
42015
42016 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42017 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42018 static int proc_pid_syscall(struct task_struct *task, char *buffer)
42019 {
42020 long nr;
42021 @@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
42022 /************************************************************************/
42023
42024 /* permission checks */
42025 -static int proc_fd_access_allowed(struct inode *inode)
42026 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
42027 {
42028 struct task_struct *task;
42029 int allowed = 0;
42030 @@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
42031 */
42032 task = get_proc_task(inode);
42033 if (task) {
42034 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42035 + if (log)
42036 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
42037 + else
42038 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42039 put_task_struct(task);
42040 }
42041 return allowed;
42042 @@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
42043 if (!task)
42044 goto out_no_task;
42045
42046 + if (gr_acl_handle_procpidmem(task))
42047 + goto out;
42048 +
42049 if (!ptrace_may_access(task, PTRACE_MODE_READ))
42050 goto out;
42051
42052 @@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
42053 path_put(&nd->path);
42054
42055 /* Are we allowed to snoop on the tasks file descriptors? */
42056 - if (!proc_fd_access_allowed(inode))
42057 + if (!proc_fd_access_allowed(inode,0))
42058 goto out;
42059
42060 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
42061 @@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
42062 struct path path;
42063
42064 /* Are we allowed to snoop on the tasks file descriptors? */
42065 - if (!proc_fd_access_allowed(inode))
42066 - goto out;
42067 + /* logging this is needed for learning on chromium to work properly,
42068 + but we don't want to flood the logs from 'ps' which does a readlink
42069 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
42070 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
42071 + */
42072 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
42073 + if (!proc_fd_access_allowed(inode,0))
42074 + goto out;
42075 + } else {
42076 + if (!proc_fd_access_allowed(inode,1))
42077 + goto out;
42078 + }
42079
42080 error = PROC_I(inode)->op.proc_get_link(inode, &path);
42081 if (error)
42082 @@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
42083 rcu_read_lock();
42084 cred = __task_cred(task);
42085 inode->i_uid = cred->euid;
42086 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42087 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42088 +#else
42089 inode->i_gid = cred->egid;
42090 +#endif
42091 rcu_read_unlock();
42092 }
42093 security_task_to_inode(task, inode);
42094 @@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
42095 struct inode *inode = dentry->d_inode;
42096 struct task_struct *task;
42097 const struct cred *cred;
42098 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42099 + const struct cred *tmpcred = current_cred();
42100 +#endif
42101
42102 generic_fillattr(inode, stat);
42103
42104 @@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
42105 stat->uid = 0;
42106 stat->gid = 0;
42107 task = pid_task(proc_pid(inode), PIDTYPE_PID);
42108 +
42109 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
42110 + rcu_read_unlock();
42111 + return -ENOENT;
42112 + }
42113 +
42114 if (task) {
42115 + cred = __task_cred(task);
42116 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42117 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
42118 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42119 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42120 +#endif
42121 + ) {
42122 +#endif
42123 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42124 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42125 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42126 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42127 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42128 +#endif
42129 task_dumpable(task)) {
42130 - cred = __task_cred(task);
42131 stat->uid = cred->euid;
42132 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42133 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
42134 +#else
42135 stat->gid = cred->egid;
42136 +#endif
42137 }
42138 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42139 + } else {
42140 + rcu_read_unlock();
42141 + return -ENOENT;
42142 + }
42143 +#endif
42144 }
42145 rcu_read_unlock();
42146 return 0;
42147 @@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
42148
42149 if (task) {
42150 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42151 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42152 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42153 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42154 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42155 +#endif
42156 task_dumpable(task)) {
42157 rcu_read_lock();
42158 cred = __task_cred(task);
42159 inode->i_uid = cred->euid;
42160 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42161 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42162 +#else
42163 inode->i_gid = cred->egid;
42164 +#endif
42165 rcu_read_unlock();
42166 } else {
42167 inode->i_uid = 0;
42168 @@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
42169 int fd = proc_fd(inode);
42170
42171 if (task) {
42172 - files = get_files_struct(task);
42173 + if (!gr_acl_handle_procpidmem(task))
42174 + files = get_files_struct(task);
42175 put_task_struct(task);
42176 }
42177 if (files) {
42178 @@ -1895,12 +1994,22 @@ static const struct file_operations proc
42179 static int proc_fd_permission(struct inode *inode, int mask)
42180 {
42181 int rv;
42182 + struct task_struct *task;
42183
42184 rv = generic_permission(inode, mask, NULL);
42185 - if (rv == 0)
42186 - return 0;
42187 +
42188 if (task_pid(current) == proc_pid(inode))
42189 rv = 0;
42190 +
42191 + task = get_proc_task(inode);
42192 + if (task == NULL)
42193 + return rv;
42194 +
42195 + if (gr_acl_handle_procpidmem(task))
42196 + rv = -EACCES;
42197 +
42198 + put_task_struct(task);
42199 +
42200 return rv;
42201 }
42202
42203 @@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
42204 if (!task)
42205 goto out_no_task;
42206
42207 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42208 + goto out;
42209 +
42210 /*
42211 * Yes, it does not scale. And it should not. Don't add
42212 * new entries into /proc/<tgid>/ without very good reasons.
42213 @@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
42214 if (!task)
42215 goto out_no_task;
42216
42217 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42218 + goto out;
42219 +
42220 ret = 0;
42221 i = filp->f_pos;
42222 switch (i) {
42223 @@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
42224 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
42225 void *cookie)
42226 {
42227 - char *s = nd_get_link(nd);
42228 + const char *s = nd_get_link(nd);
42229 if (!IS_ERR(s))
42230 __putname(s);
42231 }
42232 @@ -2519,7 +2634,7 @@ static const struct pid_entry tgid_base_
42233 #ifdef CONFIG_SCHED_DEBUG
42234 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42235 #endif
42236 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42237 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42238 INF("syscall", S_IRUSR, proc_pid_syscall),
42239 #endif
42240 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42241 @@ -2544,10 +2659,10 @@ static const struct pid_entry tgid_base_
42242 #ifdef CONFIG_SECURITY
42243 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42244 #endif
42245 -#ifdef CONFIG_KALLSYMS
42246 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42247 INF("wchan", S_IRUGO, proc_pid_wchan),
42248 #endif
42249 -#ifdef CONFIG_STACKTRACE
42250 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42251 ONE("stack", S_IRUSR, proc_pid_stack),
42252 #endif
42253 #ifdef CONFIG_SCHEDSTATS
42254 @@ -2577,6 +2692,9 @@ static const struct pid_entry tgid_base_
42255 #ifdef CONFIG_TASK_IO_ACCOUNTING
42256 INF("io", S_IRUGO, proc_tgid_io_accounting),
42257 #endif
42258 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42259 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
42260 +#endif
42261 };
42262
42263 static int proc_tgid_base_readdir(struct file * filp,
42264 @@ -2701,7 +2819,14 @@ static struct dentry *proc_pid_instantia
42265 if (!inode)
42266 goto out;
42267
42268 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42269 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
42270 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42271 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42272 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
42273 +#else
42274 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
42275 +#endif
42276 inode->i_op = &proc_tgid_base_inode_operations;
42277 inode->i_fop = &proc_tgid_base_operations;
42278 inode->i_flags|=S_IMMUTABLE;
42279 @@ -2743,7 +2868,11 @@ struct dentry *proc_pid_lookup(struct in
42280 if (!task)
42281 goto out;
42282
42283 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42284 + goto out_put_task;
42285 +
42286 result = proc_pid_instantiate(dir, dentry, task, NULL);
42287 +out_put_task:
42288 put_task_struct(task);
42289 out:
42290 return result;
42291 @@ -2808,6 +2937,11 @@ int proc_pid_readdir(struct file * filp,
42292 {
42293 unsigned int nr;
42294 struct task_struct *reaper;
42295 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42296 + const struct cred *tmpcred = current_cred();
42297 + const struct cred *itercred;
42298 +#endif
42299 + filldir_t __filldir = filldir;
42300 struct tgid_iter iter;
42301 struct pid_namespace *ns;
42302
42303 @@ -2831,8 +2965,27 @@ int proc_pid_readdir(struct file * filp,
42304 for (iter = next_tgid(ns, iter);
42305 iter.task;
42306 iter.tgid += 1, iter = next_tgid(ns, iter)) {
42307 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42308 + rcu_read_lock();
42309 + itercred = __task_cred(iter.task);
42310 +#endif
42311 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
42312 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42313 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
42314 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42315 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42316 +#endif
42317 + )
42318 +#endif
42319 + )
42320 + __filldir = &gr_fake_filldir;
42321 + else
42322 + __filldir = filldir;
42323 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42324 + rcu_read_unlock();
42325 +#endif
42326 filp->f_pos = iter.tgid + TGID_OFFSET;
42327 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
42328 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
42329 put_task_struct(iter.task);
42330 goto out;
42331 }
42332 @@ -2858,7 +3011,7 @@ static const struct pid_entry tid_base_s
42333 #ifdef CONFIG_SCHED_DEBUG
42334 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42335 #endif
42336 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42337 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42338 INF("syscall", S_IRUSR, proc_pid_syscall),
42339 #endif
42340 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42341 @@ -2882,10 +3035,10 @@ static const struct pid_entry tid_base_s
42342 #ifdef CONFIG_SECURITY
42343 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42344 #endif
42345 -#ifdef CONFIG_KALLSYMS
42346 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42347 INF("wchan", S_IRUGO, proc_pid_wchan),
42348 #endif
42349 -#ifdef CONFIG_STACKTRACE
42350 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42351 ONE("stack", S_IRUSR, proc_pid_stack),
42352 #endif
42353 #ifdef CONFIG_SCHEDSTATS
42354 diff -urNp linux-2.6.32.42/fs/proc/cmdline.c linux-2.6.32.42/fs/proc/cmdline.c
42355 --- linux-2.6.32.42/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
42356 +++ linux-2.6.32.42/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
42357 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
42358
42359 static int __init proc_cmdline_init(void)
42360 {
42361 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42362 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
42363 +#else
42364 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
42365 +#endif
42366 return 0;
42367 }
42368 module_init(proc_cmdline_init);
42369 diff -urNp linux-2.6.32.42/fs/proc/devices.c linux-2.6.32.42/fs/proc/devices.c
42370 --- linux-2.6.32.42/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
42371 +++ linux-2.6.32.42/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
42372 @@ -64,7 +64,11 @@ static const struct file_operations proc
42373
42374 static int __init proc_devices_init(void)
42375 {
42376 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42377 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
42378 +#else
42379 proc_create("devices", 0, NULL, &proc_devinfo_operations);
42380 +#endif
42381 return 0;
42382 }
42383 module_init(proc_devices_init);
42384 diff -urNp linux-2.6.32.42/fs/proc/inode.c linux-2.6.32.42/fs/proc/inode.c
42385 --- linux-2.6.32.42/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
42386 +++ linux-2.6.32.42/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
42387 @@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
42388 if (de->mode) {
42389 inode->i_mode = de->mode;
42390 inode->i_uid = de->uid;
42391 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42392 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42393 +#else
42394 inode->i_gid = de->gid;
42395 +#endif
42396 }
42397 if (de->size)
42398 inode->i_size = de->size;
42399 diff -urNp linux-2.6.32.42/fs/proc/internal.h linux-2.6.32.42/fs/proc/internal.h
42400 --- linux-2.6.32.42/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
42401 +++ linux-2.6.32.42/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
42402 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
42403 struct pid *pid, struct task_struct *task);
42404 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
42405 struct pid *pid, struct task_struct *task);
42406 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42407 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
42408 +#endif
42409 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
42410
42411 extern const struct file_operations proc_maps_operations;
42412 diff -urNp linux-2.6.32.42/fs/proc/Kconfig linux-2.6.32.42/fs/proc/Kconfig
42413 --- linux-2.6.32.42/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
42414 +++ linux-2.6.32.42/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
42415 @@ -30,12 +30,12 @@ config PROC_FS
42416
42417 config PROC_KCORE
42418 bool "/proc/kcore support" if !ARM
42419 - depends on PROC_FS && MMU
42420 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
42421
42422 config PROC_VMCORE
42423 bool "/proc/vmcore support (EXPERIMENTAL)"
42424 - depends on PROC_FS && CRASH_DUMP
42425 - default y
42426 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
42427 + default n
42428 help
42429 Exports the dump image of crashed kernel in ELF format.
42430
42431 @@ -59,8 +59,8 @@ config PROC_SYSCTL
42432 limited in memory.
42433
42434 config PROC_PAGE_MONITOR
42435 - default y
42436 - depends on PROC_FS && MMU
42437 + default n
42438 + depends on PROC_FS && MMU && !GRKERNSEC
42439 bool "Enable /proc page monitoring" if EMBEDDED
42440 help
42441 Various /proc files exist to monitor process memory utilization:
42442 diff -urNp linux-2.6.32.42/fs/proc/kcore.c linux-2.6.32.42/fs/proc/kcore.c
42443 --- linux-2.6.32.42/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
42444 +++ linux-2.6.32.42/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
42445 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
42446 off_t offset = 0;
42447 struct kcore_list *m;
42448
42449 + pax_track_stack();
42450 +
42451 /* setup ELF header */
42452 elf = (struct elfhdr *) bufp;
42453 bufp += sizeof(struct elfhdr);
42454 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
42455 * the addresses in the elf_phdr on our list.
42456 */
42457 start = kc_offset_to_vaddr(*fpos - elf_buflen);
42458 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
42459 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
42460 + if (tsz > buflen)
42461 tsz = buflen;
42462 -
42463 +
42464 while (buflen) {
42465 struct kcore_list *m;
42466
42467 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
42468 kfree(elf_buf);
42469 } else {
42470 if (kern_addr_valid(start)) {
42471 - unsigned long n;
42472 + char *elf_buf;
42473 + mm_segment_t oldfs;
42474
42475 - n = copy_to_user(buffer, (char *)start, tsz);
42476 - /*
42477 - * We cannot distingush between fault on source
42478 - * and fault on destination. When this happens
42479 - * we clear too and hope it will trigger the
42480 - * EFAULT again.
42481 - */
42482 - if (n) {
42483 - if (clear_user(buffer + tsz - n,
42484 - n))
42485 + elf_buf = kmalloc(tsz, GFP_KERNEL);
42486 + if (!elf_buf)
42487 + return -ENOMEM;
42488 + oldfs = get_fs();
42489 + set_fs(KERNEL_DS);
42490 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
42491 + set_fs(oldfs);
42492 + if (copy_to_user(buffer, elf_buf, tsz)) {
42493 + kfree(elf_buf);
42494 return -EFAULT;
42495 + }
42496 }
42497 + set_fs(oldfs);
42498 + kfree(elf_buf);
42499 } else {
42500 if (clear_user(buffer, tsz))
42501 return -EFAULT;
42502 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
42503
42504 static int open_kcore(struct inode *inode, struct file *filp)
42505 {
42506 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
42507 + return -EPERM;
42508 +#endif
42509 if (!capable(CAP_SYS_RAWIO))
42510 return -EPERM;
42511 if (kcore_need_update)
42512 diff -urNp linux-2.6.32.42/fs/proc/meminfo.c linux-2.6.32.42/fs/proc/meminfo.c
42513 --- linux-2.6.32.42/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
42514 +++ linux-2.6.32.42/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
42515 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
42516 unsigned long pages[NR_LRU_LISTS];
42517 int lru;
42518
42519 + pax_track_stack();
42520 +
42521 /*
42522 * display in kilobytes.
42523 */
42524 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
42525 vmi.used >> 10,
42526 vmi.largest_chunk >> 10
42527 #ifdef CONFIG_MEMORY_FAILURE
42528 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
42529 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
42530 #endif
42531 );
42532
42533 diff -urNp linux-2.6.32.42/fs/proc/nommu.c linux-2.6.32.42/fs/proc/nommu.c
42534 --- linux-2.6.32.42/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
42535 +++ linux-2.6.32.42/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
42536 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
42537 if (len < 1)
42538 len = 1;
42539 seq_printf(m, "%*c", len, ' ');
42540 - seq_path(m, &file->f_path, "");
42541 + seq_path(m, &file->f_path, "\n\\");
42542 }
42543
42544 seq_putc(m, '\n');
42545 diff -urNp linux-2.6.32.42/fs/proc/proc_net.c linux-2.6.32.42/fs/proc/proc_net.c
42546 --- linux-2.6.32.42/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
42547 +++ linux-2.6.32.42/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
42548 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
42549 struct task_struct *task;
42550 struct nsproxy *ns;
42551 struct net *net = NULL;
42552 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42553 + const struct cred *cred = current_cred();
42554 +#endif
42555 +
42556 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42557 + if (cred->fsuid)
42558 + return net;
42559 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42560 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
42561 + return net;
42562 +#endif
42563
42564 rcu_read_lock();
42565 task = pid_task(proc_pid(dir), PIDTYPE_PID);
42566 diff -urNp linux-2.6.32.42/fs/proc/proc_sysctl.c linux-2.6.32.42/fs/proc/proc_sysctl.c
42567 --- linux-2.6.32.42/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
42568 +++ linux-2.6.32.42/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
42569 @@ -7,6 +7,8 @@
42570 #include <linux/security.h>
42571 #include "internal.h"
42572
42573 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
42574 +
42575 static const struct dentry_operations proc_sys_dentry_operations;
42576 static const struct file_operations proc_sys_file_operations;
42577 static const struct inode_operations proc_sys_inode_operations;
42578 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
42579 if (!p)
42580 goto out;
42581
42582 + if (gr_handle_sysctl(p, MAY_EXEC))
42583 + goto out;
42584 +
42585 err = ERR_PTR(-ENOMEM);
42586 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
42587 if (h)
42588 @@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
42589 if (*pos < file->f_pos)
42590 continue;
42591
42592 + if (gr_handle_sysctl(table, 0))
42593 + continue;
42594 +
42595 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
42596 if (res)
42597 return res;
42598 @@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
42599 if (IS_ERR(head))
42600 return PTR_ERR(head);
42601
42602 + if (table && gr_handle_sysctl(table, MAY_EXEC))
42603 + return -ENOENT;
42604 +
42605 generic_fillattr(inode, stat);
42606 if (table)
42607 stat->mode = (stat->mode & S_IFMT) | table->mode;
42608 diff -urNp linux-2.6.32.42/fs/proc/root.c linux-2.6.32.42/fs/proc/root.c
42609 --- linux-2.6.32.42/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
42610 +++ linux-2.6.32.42/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
42611 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
42612 #ifdef CONFIG_PROC_DEVICETREE
42613 proc_device_tree_init();
42614 #endif
42615 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42616 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42617 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
42618 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42619 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
42620 +#endif
42621 +#else
42622 proc_mkdir("bus", NULL);
42623 +#endif
42624 proc_sys_init();
42625 }
42626
42627 diff -urNp linux-2.6.32.42/fs/proc/task_mmu.c linux-2.6.32.42/fs/proc/task_mmu.c
42628 --- linux-2.6.32.42/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
42629 +++ linux-2.6.32.42/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
42630 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
42631 "VmStk:\t%8lu kB\n"
42632 "VmExe:\t%8lu kB\n"
42633 "VmLib:\t%8lu kB\n"
42634 - "VmPTE:\t%8lu kB\n",
42635 - hiwater_vm << (PAGE_SHIFT-10),
42636 + "VmPTE:\t%8lu kB\n"
42637 +
42638 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42639 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
42640 +#endif
42641 +
42642 + ,hiwater_vm << (PAGE_SHIFT-10),
42643 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
42644 mm->locked_vm << (PAGE_SHIFT-10),
42645 hiwater_rss << (PAGE_SHIFT-10),
42646 total_rss << (PAGE_SHIFT-10),
42647 data << (PAGE_SHIFT-10),
42648 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
42649 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
42650 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
42651 +
42652 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42653 + , mm->context.user_cs_base, mm->context.user_cs_limit
42654 +#endif
42655 +
42656 + );
42657 }
42658
42659 unsigned long task_vsize(struct mm_struct *mm)
42660 @@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
42661 struct proc_maps_private *priv = m->private;
42662 struct vm_area_struct *vma = v;
42663
42664 - vma_stop(priv, vma);
42665 + if (!IS_ERR(vma))
42666 + vma_stop(priv, vma);
42667 if (priv->task)
42668 put_task_struct(priv->task);
42669 }
42670 @@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
42671 return ret;
42672 }
42673
42674 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42675 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42676 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
42677 + _mm->pax_flags & MF_PAX_SEGMEXEC))
42678 +#endif
42679 +
42680 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
42681 {
42682 struct mm_struct *mm = vma->vm_mm;
42683 @@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
42684 int flags = vma->vm_flags;
42685 unsigned long ino = 0;
42686 unsigned long long pgoff = 0;
42687 - unsigned long start;
42688 dev_t dev = 0;
42689 int len;
42690
42691 @@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
42692 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
42693 }
42694
42695 - /* We don't show the stack guard page in /proc/maps */
42696 - start = vma->vm_start;
42697 - if (vma->vm_flags & VM_GROWSDOWN)
42698 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
42699 - start += PAGE_SIZE;
42700 -
42701 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
42702 - start,
42703 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42704 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
42705 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
42706 +#else
42707 + vma->vm_start,
42708 vma->vm_end,
42709 +#endif
42710 flags & VM_READ ? 'r' : '-',
42711 flags & VM_WRITE ? 'w' : '-',
42712 flags & VM_EXEC ? 'x' : '-',
42713 flags & VM_MAYSHARE ? 's' : 'p',
42714 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42715 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
42716 +#else
42717 pgoff,
42718 +#endif
42719 MAJOR(dev), MINOR(dev), ino, &len);
42720
42721 /*
42722 @@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
42723 */
42724 if (file) {
42725 pad_len_spaces(m, len);
42726 - seq_path(m, &file->f_path, "\n");
42727 + seq_path(m, &file->f_path, "\n\\");
42728 } else {
42729 const char *name = arch_vma_name(vma);
42730 if (!name) {
42731 @@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
42732 if (vma->vm_start <= mm->brk &&
42733 vma->vm_end >= mm->start_brk) {
42734 name = "[heap]";
42735 - } else if (vma->vm_start <= mm->start_stack &&
42736 - vma->vm_end >= mm->start_stack) {
42737 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
42738 + (vma->vm_start <= mm->start_stack &&
42739 + vma->vm_end >= mm->start_stack)) {
42740 name = "[stack]";
42741 }
42742 } else {
42743 @@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
42744 };
42745
42746 memset(&mss, 0, sizeof mss);
42747 - mss.vma = vma;
42748 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
42749 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
42750 +
42751 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42752 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
42753 +#endif
42754 + mss.vma = vma;
42755 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
42756 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
42757 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42758 + }
42759 +#endif
42760
42761 show_map_vma(m, vma);
42762
42763 @@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
42764 "Swap: %8lu kB\n"
42765 "KernelPageSize: %8lu kB\n"
42766 "MMUPageSize: %8lu kB\n",
42767 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42768 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
42769 +#else
42770 (vma->vm_end - vma->vm_start) >> 10,
42771 +#endif
42772 mss.resident >> 10,
42773 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
42774 mss.shared_clean >> 10,
42775 diff -urNp linux-2.6.32.42/fs/proc/task_nommu.c linux-2.6.32.42/fs/proc/task_nommu.c
42776 --- linux-2.6.32.42/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
42777 +++ linux-2.6.32.42/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
42778 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
42779 else
42780 bytes += kobjsize(mm);
42781
42782 - if (current->fs && current->fs->users > 1)
42783 + if (current->fs && atomic_read(&current->fs->users) > 1)
42784 sbytes += kobjsize(current->fs);
42785 else
42786 bytes += kobjsize(current->fs);
42787 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
42788 if (len < 1)
42789 len = 1;
42790 seq_printf(m, "%*c", len, ' ');
42791 - seq_path(m, &file->f_path, "");
42792 + seq_path(m, &file->f_path, "\n\\");
42793 }
42794
42795 seq_putc(m, '\n');
42796 diff -urNp linux-2.6.32.42/fs/readdir.c linux-2.6.32.42/fs/readdir.c
42797 --- linux-2.6.32.42/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
42798 +++ linux-2.6.32.42/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
42799 @@ -16,6 +16,7 @@
42800 #include <linux/security.h>
42801 #include <linux/syscalls.h>
42802 #include <linux/unistd.h>
42803 +#include <linux/namei.h>
42804
42805 #include <asm/uaccess.h>
42806
42807 @@ -67,6 +68,7 @@ struct old_linux_dirent {
42808
42809 struct readdir_callback {
42810 struct old_linux_dirent __user * dirent;
42811 + struct file * file;
42812 int result;
42813 };
42814
42815 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
42816 buf->result = -EOVERFLOW;
42817 return -EOVERFLOW;
42818 }
42819 +
42820 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42821 + return 0;
42822 +
42823 buf->result++;
42824 dirent = buf->dirent;
42825 if (!access_ok(VERIFY_WRITE, dirent,
42826 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
42827
42828 buf.result = 0;
42829 buf.dirent = dirent;
42830 + buf.file = file;
42831
42832 error = vfs_readdir(file, fillonedir, &buf);
42833 if (buf.result)
42834 @@ -142,6 +149,7 @@ struct linux_dirent {
42835 struct getdents_callback {
42836 struct linux_dirent __user * current_dir;
42837 struct linux_dirent __user * previous;
42838 + struct file * file;
42839 int count;
42840 int error;
42841 };
42842 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
42843 buf->error = -EOVERFLOW;
42844 return -EOVERFLOW;
42845 }
42846 +
42847 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42848 + return 0;
42849 +
42850 dirent = buf->previous;
42851 if (dirent) {
42852 if (__put_user(offset, &dirent->d_off))
42853 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
42854 buf.previous = NULL;
42855 buf.count = count;
42856 buf.error = 0;
42857 + buf.file = file;
42858
42859 error = vfs_readdir(file, filldir, &buf);
42860 if (error >= 0)
42861 @@ -228,6 +241,7 @@ out:
42862 struct getdents_callback64 {
42863 struct linux_dirent64 __user * current_dir;
42864 struct linux_dirent64 __user * previous;
42865 + struct file *file;
42866 int count;
42867 int error;
42868 };
42869 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
42870 buf->error = -EINVAL; /* only used if we fail.. */
42871 if (reclen > buf->count)
42872 return -EINVAL;
42873 +
42874 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42875 + return 0;
42876 +
42877 dirent = buf->previous;
42878 if (dirent) {
42879 if (__put_user(offset, &dirent->d_off))
42880 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
42881
42882 buf.current_dir = dirent;
42883 buf.previous = NULL;
42884 + buf.file = file;
42885 buf.count = count;
42886 buf.error = 0;
42887
42888 diff -urNp linux-2.6.32.42/fs/reiserfs/dir.c linux-2.6.32.42/fs/reiserfs/dir.c
42889 --- linux-2.6.32.42/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
42890 +++ linux-2.6.32.42/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
42891 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
42892 struct reiserfs_dir_entry de;
42893 int ret = 0;
42894
42895 + pax_track_stack();
42896 +
42897 reiserfs_write_lock(inode->i_sb);
42898
42899 reiserfs_check_lock_depth(inode->i_sb, "readdir");
42900 diff -urNp linux-2.6.32.42/fs/reiserfs/do_balan.c linux-2.6.32.42/fs/reiserfs/do_balan.c
42901 --- linux-2.6.32.42/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
42902 +++ linux-2.6.32.42/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
42903 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
42904 return;
42905 }
42906
42907 - atomic_inc(&(fs_generation(tb->tb_sb)));
42908 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
42909 do_balance_starts(tb);
42910
42911 /* balance leaf returns 0 except if combining L R and S into
42912 diff -urNp linux-2.6.32.42/fs/reiserfs/item_ops.c linux-2.6.32.42/fs/reiserfs/item_ops.c
42913 --- linux-2.6.32.42/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
42914 +++ linux-2.6.32.42/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
42915 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
42916 vi->vi_index, vi->vi_type, vi->vi_ih);
42917 }
42918
42919 -static struct item_operations stat_data_ops = {
42920 +static const struct item_operations stat_data_ops = {
42921 .bytes_number = sd_bytes_number,
42922 .decrement_key = sd_decrement_key,
42923 .is_left_mergeable = sd_is_left_mergeable,
42924 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
42925 vi->vi_index, vi->vi_type, vi->vi_ih);
42926 }
42927
42928 -static struct item_operations direct_ops = {
42929 +static const struct item_operations direct_ops = {
42930 .bytes_number = direct_bytes_number,
42931 .decrement_key = direct_decrement_key,
42932 .is_left_mergeable = direct_is_left_mergeable,
42933 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
42934 vi->vi_index, vi->vi_type, vi->vi_ih);
42935 }
42936
42937 -static struct item_operations indirect_ops = {
42938 +static const struct item_operations indirect_ops = {
42939 .bytes_number = indirect_bytes_number,
42940 .decrement_key = indirect_decrement_key,
42941 .is_left_mergeable = indirect_is_left_mergeable,
42942 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
42943 printk("\n");
42944 }
42945
42946 -static struct item_operations direntry_ops = {
42947 +static const struct item_operations direntry_ops = {
42948 .bytes_number = direntry_bytes_number,
42949 .decrement_key = direntry_decrement_key,
42950 .is_left_mergeable = direntry_is_left_mergeable,
42951 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
42952 "Invalid item type observed, run fsck ASAP");
42953 }
42954
42955 -static struct item_operations errcatch_ops = {
42956 +static const struct item_operations errcatch_ops = {
42957 errcatch_bytes_number,
42958 errcatch_decrement_key,
42959 errcatch_is_left_mergeable,
42960 @@ -746,7 +746,7 @@ static struct item_operations errcatch_o
42961 #error Item types must use disk-format assigned values.
42962 #endif
42963
42964 -struct item_operations *item_ops[TYPE_ANY + 1] = {
42965 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
42966 &stat_data_ops,
42967 &indirect_ops,
42968 &direct_ops,
42969 diff -urNp linux-2.6.32.42/fs/reiserfs/journal.c linux-2.6.32.42/fs/reiserfs/journal.c
42970 --- linux-2.6.32.42/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
42971 +++ linux-2.6.32.42/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
42972 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
42973 struct buffer_head *bh;
42974 int i, j;
42975
42976 + pax_track_stack();
42977 +
42978 bh = __getblk(dev, block, bufsize);
42979 if (buffer_uptodate(bh))
42980 return (bh);
42981 diff -urNp linux-2.6.32.42/fs/reiserfs/namei.c linux-2.6.32.42/fs/reiserfs/namei.c
42982 --- linux-2.6.32.42/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
42983 +++ linux-2.6.32.42/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
42984 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
42985 unsigned long savelink = 1;
42986 struct timespec ctime;
42987
42988 + pax_track_stack();
42989 +
42990 /* three balancings: (1) old name removal, (2) new name insertion
42991 and (3) maybe "save" link insertion
42992 stat data updates: (1) old directory,
42993 diff -urNp linux-2.6.32.42/fs/reiserfs/procfs.c linux-2.6.32.42/fs/reiserfs/procfs.c
42994 --- linux-2.6.32.42/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
42995 +++ linux-2.6.32.42/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
42996 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
42997 "SMALL_TAILS " : "NO_TAILS ",
42998 replay_only(sb) ? "REPLAY_ONLY " : "",
42999 convert_reiserfs(sb) ? "CONV " : "",
43000 - atomic_read(&r->s_generation_counter),
43001 + atomic_read_unchecked(&r->s_generation_counter),
43002 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
43003 SF(s_do_balance), SF(s_unneeded_left_neighbor),
43004 SF(s_good_search_by_key_reada), SF(s_bmaps),
43005 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file
43006 struct journal_params *jp = &rs->s_v1.s_journal;
43007 char b[BDEVNAME_SIZE];
43008
43009 + pax_track_stack();
43010 +
43011 seq_printf(m, /* on-disk fields */
43012 "jp_journal_1st_block: \t%i\n"
43013 "jp_journal_dev: \t%s[%x]\n"
43014 diff -urNp linux-2.6.32.42/fs/reiserfs/stree.c linux-2.6.32.42/fs/reiserfs/stree.c
43015 --- linux-2.6.32.42/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
43016 +++ linux-2.6.32.42/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
43017 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
43018 int iter = 0;
43019 #endif
43020
43021 + pax_track_stack();
43022 +
43023 BUG_ON(!th->t_trans_id);
43024
43025 init_tb_struct(th, &s_del_balance, sb, path,
43026 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
43027 int retval;
43028 int quota_cut_bytes = 0;
43029
43030 + pax_track_stack();
43031 +
43032 BUG_ON(!th->t_trans_id);
43033
43034 le_key2cpu_key(&cpu_key, key);
43035 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
43036 int quota_cut_bytes;
43037 loff_t tail_pos = 0;
43038
43039 + pax_track_stack();
43040 +
43041 BUG_ON(!th->t_trans_id);
43042
43043 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
43044 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
43045 int retval;
43046 int fs_gen;
43047
43048 + pax_track_stack();
43049 +
43050 BUG_ON(!th->t_trans_id);
43051
43052 fs_gen = get_generation(inode->i_sb);
43053 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
43054 int fs_gen = 0;
43055 int quota_bytes = 0;
43056
43057 + pax_track_stack();
43058 +
43059 BUG_ON(!th->t_trans_id);
43060
43061 if (inode) { /* Do we count quotas for item? */
43062 diff -urNp linux-2.6.32.42/fs/reiserfs/super.c linux-2.6.32.42/fs/reiserfs/super.c
43063 --- linux-2.6.32.42/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
43064 +++ linux-2.6.32.42/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
43065 @@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
43066 {.option_name = NULL}
43067 };
43068
43069 + pax_track_stack();
43070 +
43071 *blocks = 0;
43072 if (!options || !*options)
43073 /* use default configuration: create tails, journaling on, no
43074 diff -urNp linux-2.6.32.42/fs/select.c linux-2.6.32.42/fs/select.c
43075 --- linux-2.6.32.42/fs/select.c 2011-03-27 14:31:47.000000000 -0400
43076 +++ linux-2.6.32.42/fs/select.c 2011-05-16 21:46:57.000000000 -0400
43077 @@ -20,6 +20,7 @@
43078 #include <linux/module.h>
43079 #include <linux/slab.h>
43080 #include <linux/poll.h>
43081 +#include <linux/security.h>
43082 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
43083 #include <linux/file.h>
43084 #include <linux/fdtable.h>
43085 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
43086 int retval, i, timed_out = 0;
43087 unsigned long slack = 0;
43088
43089 + pax_track_stack();
43090 +
43091 rcu_read_lock();
43092 retval = max_select_fd(n, fds);
43093 rcu_read_unlock();
43094 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
43095 /* Allocate small arguments on the stack to save memory and be faster */
43096 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
43097
43098 + pax_track_stack();
43099 +
43100 ret = -EINVAL;
43101 if (n < 0)
43102 goto out_nofds;
43103 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
43104 struct poll_list *walk = head;
43105 unsigned long todo = nfds;
43106
43107 + pax_track_stack();
43108 +
43109 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
43110 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
43111 return -EINVAL;
43112
43113 diff -urNp linux-2.6.32.42/fs/seq_file.c linux-2.6.32.42/fs/seq_file.c
43114 --- linux-2.6.32.42/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
43115 +++ linux-2.6.32.42/fs/seq_file.c 2011-04-17 15:56:46.000000000 -0400
43116 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
43117 return 0;
43118 }
43119 if (!m->buf) {
43120 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43121 + m->size = PAGE_SIZE;
43122 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43123 if (!m->buf)
43124 return -ENOMEM;
43125 }
43126 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
43127 Eoverflow:
43128 m->op->stop(m, p);
43129 kfree(m->buf);
43130 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43131 + m->size <<= 1;
43132 + m->buf = kmalloc(m->size, GFP_KERNEL);
43133 return !m->buf ? -ENOMEM : -EAGAIN;
43134 }
43135
43136 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
43137 m->version = file->f_version;
43138 /* grab buffer if we didn't have one */
43139 if (!m->buf) {
43140 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43141 + m->size = PAGE_SIZE;
43142 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43143 if (!m->buf)
43144 goto Enomem;
43145 }
43146 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
43147 goto Fill;
43148 m->op->stop(m, p);
43149 kfree(m->buf);
43150 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43151 + m->size <<= 1;
43152 + m->buf = kmalloc(m->size, GFP_KERNEL);
43153 if (!m->buf)
43154 goto Enomem;
43155 m->count = 0;
43156 diff -urNp linux-2.6.32.42/fs/smbfs/symlink.c linux-2.6.32.42/fs/smbfs/symlink.c
43157 --- linux-2.6.32.42/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
43158 +++ linux-2.6.32.42/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
43159 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
43160
43161 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43162 {
43163 - char *s = nd_get_link(nd);
43164 + const char *s = nd_get_link(nd);
43165 if (!IS_ERR(s))
43166 __putname(s);
43167 }
43168 diff -urNp linux-2.6.32.42/fs/splice.c linux-2.6.32.42/fs/splice.c
43169 --- linux-2.6.32.42/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
43170 +++ linux-2.6.32.42/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
43171 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
43172 pipe_lock(pipe);
43173
43174 for (;;) {
43175 - if (!pipe->readers) {
43176 + if (!atomic_read(&pipe->readers)) {
43177 send_sig(SIGPIPE, current, 0);
43178 if (!ret)
43179 ret = -EPIPE;
43180 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
43181 do_wakeup = 0;
43182 }
43183
43184 - pipe->waiting_writers++;
43185 + atomic_inc(&pipe->waiting_writers);
43186 pipe_wait(pipe);
43187 - pipe->waiting_writers--;
43188 + atomic_dec(&pipe->waiting_writers);
43189 }
43190
43191 pipe_unlock(pipe);
43192 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
43193 .spd_release = spd_release_page,
43194 };
43195
43196 + pax_track_stack();
43197 +
43198 index = *ppos >> PAGE_CACHE_SHIFT;
43199 loff = *ppos & ~PAGE_CACHE_MASK;
43200 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43201 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
43202 old_fs = get_fs();
43203 set_fs(get_ds());
43204 /* The cast to a user pointer is valid due to the set_fs() */
43205 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
43206 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
43207 set_fs(old_fs);
43208
43209 return res;
43210 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
43211 old_fs = get_fs();
43212 set_fs(get_ds());
43213 /* The cast to a user pointer is valid due to the set_fs() */
43214 - res = vfs_write(file, (const char __user *)buf, count, &pos);
43215 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
43216 set_fs(old_fs);
43217
43218 return res;
43219 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
43220 .spd_release = spd_release_page,
43221 };
43222
43223 + pax_track_stack();
43224 +
43225 index = *ppos >> PAGE_CACHE_SHIFT;
43226 offset = *ppos & ~PAGE_CACHE_MASK;
43227 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43228 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
43229 goto err;
43230
43231 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
43232 - vec[i].iov_base = (void __user *) page_address(page);
43233 + vec[i].iov_base = (__force void __user *) page_address(page);
43234 vec[i].iov_len = this_len;
43235 pages[i] = page;
43236 spd.nr_pages++;
43237 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
43238 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
43239 {
43240 while (!pipe->nrbufs) {
43241 - if (!pipe->writers)
43242 + if (!atomic_read(&pipe->writers))
43243 return 0;
43244
43245 - if (!pipe->waiting_writers && sd->num_spliced)
43246 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
43247 return 0;
43248
43249 if (sd->flags & SPLICE_F_NONBLOCK)
43250 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
43251 * out of the pipe right after the splice_to_pipe(). So set
43252 * PIPE_READERS appropriately.
43253 */
43254 - pipe->readers = 1;
43255 + atomic_set(&pipe->readers, 1);
43256
43257 current->splice_pipe = pipe;
43258 }
43259 @@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
43260 .spd_release = spd_release_page,
43261 };
43262
43263 + pax_track_stack();
43264 +
43265 pipe = pipe_info(file->f_path.dentry->d_inode);
43266 if (!pipe)
43267 return -EBADF;
43268 @@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
43269 ret = -ERESTARTSYS;
43270 break;
43271 }
43272 - if (!pipe->writers)
43273 + if (!atomic_read(&pipe->writers))
43274 break;
43275 - if (!pipe->waiting_writers) {
43276 + if (!atomic_read(&pipe->waiting_writers)) {
43277 if (flags & SPLICE_F_NONBLOCK) {
43278 ret = -EAGAIN;
43279 break;
43280 @@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
43281 pipe_lock(pipe);
43282
43283 while (pipe->nrbufs >= PIPE_BUFFERS) {
43284 - if (!pipe->readers) {
43285 + if (!atomic_read(&pipe->readers)) {
43286 send_sig(SIGPIPE, current, 0);
43287 ret = -EPIPE;
43288 break;
43289 @@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
43290 ret = -ERESTARTSYS;
43291 break;
43292 }
43293 - pipe->waiting_writers++;
43294 + atomic_inc(&pipe->waiting_writers);
43295 pipe_wait(pipe);
43296 - pipe->waiting_writers--;
43297 + atomic_dec(&pipe->waiting_writers);
43298 }
43299
43300 pipe_unlock(pipe);
43301 @@ -1785,14 +1791,14 @@ retry:
43302 pipe_double_lock(ipipe, opipe);
43303
43304 do {
43305 - if (!opipe->readers) {
43306 + if (!atomic_read(&opipe->readers)) {
43307 send_sig(SIGPIPE, current, 0);
43308 if (!ret)
43309 ret = -EPIPE;
43310 break;
43311 }
43312
43313 - if (!ipipe->nrbufs && !ipipe->writers)
43314 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
43315 break;
43316
43317 /*
43318 @@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
43319 pipe_double_lock(ipipe, opipe);
43320
43321 do {
43322 - if (!opipe->readers) {
43323 + if (!atomic_read(&opipe->readers)) {
43324 send_sig(SIGPIPE, current, 0);
43325 if (!ret)
43326 ret = -EPIPE;
43327 @@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
43328 * return EAGAIN if we have the potential of some data in the
43329 * future, otherwise just return 0
43330 */
43331 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
43332 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
43333 ret = -EAGAIN;
43334
43335 pipe_unlock(ipipe);
43336 diff -urNp linux-2.6.32.42/fs/sysfs/file.c linux-2.6.32.42/fs/sysfs/file.c
43337 --- linux-2.6.32.42/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
43338 +++ linux-2.6.32.42/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
43339 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
43340
43341 struct sysfs_open_dirent {
43342 atomic_t refcnt;
43343 - atomic_t event;
43344 + atomic_unchecked_t event;
43345 wait_queue_head_t poll;
43346 struct list_head buffers; /* goes through sysfs_buffer.list */
43347 };
43348 @@ -53,7 +53,7 @@ struct sysfs_buffer {
43349 size_t count;
43350 loff_t pos;
43351 char * page;
43352 - struct sysfs_ops * ops;
43353 + const struct sysfs_ops * ops;
43354 struct mutex mutex;
43355 int needs_read_fill;
43356 int event;
43357 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
43358 {
43359 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
43360 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43361 - struct sysfs_ops * ops = buffer->ops;
43362 + const struct sysfs_ops * ops = buffer->ops;
43363 int ret = 0;
43364 ssize_t count;
43365
43366 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
43367 if (!sysfs_get_active_two(attr_sd))
43368 return -ENODEV;
43369
43370 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
43371 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
43372 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
43373
43374 sysfs_put_active_two(attr_sd);
43375 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
43376 {
43377 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
43378 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43379 - struct sysfs_ops * ops = buffer->ops;
43380 + const struct sysfs_ops * ops = buffer->ops;
43381 int rc;
43382
43383 /* need attr_sd for attr and ops, its parent for kobj */
43384 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
43385 return -ENOMEM;
43386
43387 atomic_set(&new_od->refcnt, 0);
43388 - atomic_set(&new_od->event, 1);
43389 + atomic_set_unchecked(&new_od->event, 1);
43390 init_waitqueue_head(&new_od->poll);
43391 INIT_LIST_HEAD(&new_od->buffers);
43392 goto retry;
43393 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
43394 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
43395 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43396 struct sysfs_buffer *buffer;
43397 - struct sysfs_ops *ops;
43398 + const struct sysfs_ops *ops;
43399 int error = -EACCES;
43400 char *p;
43401
43402 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
43403
43404 sysfs_put_active_two(attr_sd);
43405
43406 - if (buffer->event != atomic_read(&od->event))
43407 + if (buffer->event != atomic_read_unchecked(&od->event))
43408 goto trigger;
43409
43410 return DEFAULT_POLLMASK;
43411 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
43412
43413 od = sd->s_attr.open;
43414 if (od) {
43415 - atomic_inc(&od->event);
43416 + atomic_inc_unchecked(&od->event);
43417 wake_up_interruptible(&od->poll);
43418 }
43419
43420 diff -urNp linux-2.6.32.42/fs/sysfs/mount.c linux-2.6.32.42/fs/sysfs/mount.c
43421 --- linux-2.6.32.42/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
43422 +++ linux-2.6.32.42/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
43423 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
43424 .s_name = "",
43425 .s_count = ATOMIC_INIT(1),
43426 .s_flags = SYSFS_DIR,
43427 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43428 + .s_mode = S_IFDIR | S_IRWXU,
43429 +#else
43430 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43431 +#endif
43432 .s_ino = 1,
43433 };
43434
43435 diff -urNp linux-2.6.32.42/fs/sysfs/symlink.c linux-2.6.32.42/fs/sysfs/symlink.c
43436 --- linux-2.6.32.42/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
43437 +++ linux-2.6.32.42/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
43438 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
43439
43440 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
43441 {
43442 - char *page = nd_get_link(nd);
43443 + const char *page = nd_get_link(nd);
43444 if (!IS_ERR(page))
43445 free_page((unsigned long)page);
43446 }
43447 diff -urNp linux-2.6.32.42/fs/udf/balloc.c linux-2.6.32.42/fs/udf/balloc.c
43448 --- linux-2.6.32.42/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
43449 +++ linux-2.6.32.42/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
43450 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
43451
43452 mutex_lock(&sbi->s_alloc_mutex);
43453 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
43454 - if (bloc->logicalBlockNum < 0 ||
43455 - (bloc->logicalBlockNum + count) >
43456 - partmap->s_partition_len) {
43457 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
43458 udf_debug("%d < %d || %d + %d > %d\n",
43459 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
43460 count, partmap->s_partition_len);
43461 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
43462
43463 mutex_lock(&sbi->s_alloc_mutex);
43464 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
43465 - if (bloc->logicalBlockNum < 0 ||
43466 - (bloc->logicalBlockNum + count) >
43467 - partmap->s_partition_len) {
43468 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
43469 udf_debug("%d < %d || %d + %d > %d\n",
43470 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
43471 partmap->s_partition_len);
43472 diff -urNp linux-2.6.32.42/fs/udf/inode.c linux-2.6.32.42/fs/udf/inode.c
43473 --- linux-2.6.32.42/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
43474 +++ linux-2.6.32.42/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
43475 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
43476 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
43477 int lastblock = 0;
43478
43479 + pax_track_stack();
43480 +
43481 prev_epos.offset = udf_file_entry_alloc_offset(inode);
43482 prev_epos.block = iinfo->i_location;
43483 prev_epos.bh = NULL;
43484 diff -urNp linux-2.6.32.42/fs/udf/misc.c linux-2.6.32.42/fs/udf/misc.c
43485 --- linux-2.6.32.42/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
43486 +++ linux-2.6.32.42/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
43487 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
43488
43489 u8 udf_tag_checksum(const struct tag *t)
43490 {
43491 - u8 *data = (u8 *)t;
43492 + const u8 *data = (const u8 *)t;
43493 u8 checksum = 0;
43494 int i;
43495 for (i = 0; i < sizeof(struct tag); ++i)
43496 diff -urNp linux-2.6.32.42/fs/utimes.c linux-2.6.32.42/fs/utimes.c
43497 --- linux-2.6.32.42/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
43498 +++ linux-2.6.32.42/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
43499 @@ -1,6 +1,7 @@
43500 #include <linux/compiler.h>
43501 #include <linux/file.h>
43502 #include <linux/fs.h>
43503 +#include <linux/security.h>
43504 #include <linux/linkage.h>
43505 #include <linux/mount.h>
43506 #include <linux/namei.h>
43507 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
43508 goto mnt_drop_write_and_out;
43509 }
43510 }
43511 +
43512 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
43513 + error = -EACCES;
43514 + goto mnt_drop_write_and_out;
43515 + }
43516 +
43517 mutex_lock(&inode->i_mutex);
43518 error = notify_change(path->dentry, &newattrs);
43519 mutex_unlock(&inode->i_mutex);
43520 diff -urNp linux-2.6.32.42/fs/xattr_acl.c linux-2.6.32.42/fs/xattr_acl.c
43521 --- linux-2.6.32.42/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
43522 +++ linux-2.6.32.42/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
43523 @@ -17,8 +17,8 @@
43524 struct posix_acl *
43525 posix_acl_from_xattr(const void *value, size_t size)
43526 {
43527 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
43528 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
43529 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
43530 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
43531 int count;
43532 struct posix_acl *acl;
43533 struct posix_acl_entry *acl_e;
43534 diff -urNp linux-2.6.32.42/fs/xattr.c linux-2.6.32.42/fs/xattr.c
43535 --- linux-2.6.32.42/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
43536 +++ linux-2.6.32.42/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
43537 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
43538 * Extended attribute SET operations
43539 */
43540 static long
43541 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
43542 +setxattr(struct path *path, const char __user *name, const void __user *value,
43543 size_t size, int flags)
43544 {
43545 int error;
43546 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
43547 return PTR_ERR(kvalue);
43548 }
43549
43550 - error = vfs_setxattr(d, kname, kvalue, size, flags);
43551 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
43552 + error = -EACCES;
43553 + goto out;
43554 + }
43555 +
43556 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
43557 +out:
43558 kfree(kvalue);
43559 return error;
43560 }
43561 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
43562 return error;
43563 error = mnt_want_write(path.mnt);
43564 if (!error) {
43565 - error = setxattr(path.dentry, name, value, size, flags);
43566 + error = setxattr(&path, name, value, size, flags);
43567 mnt_drop_write(path.mnt);
43568 }
43569 path_put(&path);
43570 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
43571 return error;
43572 error = mnt_want_write(path.mnt);
43573 if (!error) {
43574 - error = setxattr(path.dentry, name, value, size, flags);
43575 + error = setxattr(&path, name, value, size, flags);
43576 mnt_drop_write(path.mnt);
43577 }
43578 path_put(&path);
43579 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
43580 const void __user *,value, size_t, size, int, flags)
43581 {
43582 struct file *f;
43583 - struct dentry *dentry;
43584 int error = -EBADF;
43585
43586 f = fget(fd);
43587 if (!f)
43588 return error;
43589 - dentry = f->f_path.dentry;
43590 - audit_inode(NULL, dentry);
43591 + audit_inode(NULL, f->f_path.dentry);
43592 error = mnt_want_write_file(f);
43593 if (!error) {
43594 - error = setxattr(dentry, name, value, size, flags);
43595 + error = setxattr(&f->f_path, name, value, size, flags);
43596 mnt_drop_write(f->f_path.mnt);
43597 }
43598 fput(f);
43599 diff -urNp linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c
43600 --- linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
43601 +++ linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
43602 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
43603 xfs_fsop_geom_t fsgeo;
43604 int error;
43605
43606 + memset(&fsgeo, 0, sizeof(fsgeo));
43607 error = xfs_fs_geometry(mp, &fsgeo, 3);
43608 if (error)
43609 return -error;
43610 diff -urNp linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c
43611 --- linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
43612 +++ linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
43613 @@ -134,7 +134,7 @@ xfs_find_handle(
43614 }
43615
43616 error = -EFAULT;
43617 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
43618 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
43619 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
43620 goto out_put;
43621
43622 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
43623 if (IS_ERR(dentry))
43624 return PTR_ERR(dentry);
43625
43626 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
43627 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
43628 if (!kbuf)
43629 goto out_dput;
43630
43631 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
43632 xfs_mount_t *mp,
43633 void __user *arg)
43634 {
43635 - xfs_fsop_geom_t fsgeo;
43636 + xfs_fsop_geom_t fsgeo;
43637 int error;
43638
43639 error = xfs_fs_geometry(mp, &fsgeo, 3);
43640 diff -urNp linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c
43641 --- linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
43642 +++ linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
43643 @@ -468,7 +468,7 @@ xfs_vn_put_link(
43644 struct nameidata *nd,
43645 void *p)
43646 {
43647 - char *s = nd_get_link(nd);
43648 + const char *s = nd_get_link(nd);
43649
43650 if (!IS_ERR(s))
43651 kfree(s);
43652 diff -urNp linux-2.6.32.42/fs/xfs/xfs_bmap.c linux-2.6.32.42/fs/xfs/xfs_bmap.c
43653 --- linux-2.6.32.42/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
43654 +++ linux-2.6.32.42/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
43655 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
43656 int nmap,
43657 int ret_nmap);
43658 #else
43659 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
43660 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
43661 #endif /* DEBUG */
43662
43663 #if defined(XFS_RW_TRACE)
43664 diff -urNp linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c
43665 --- linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
43666 +++ linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
43667 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
43668 }
43669
43670 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
43671 - if (filldir(dirent, sfep->name, sfep->namelen,
43672 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
43673 + char name[sfep->namelen];
43674 + memcpy(name, sfep->name, sfep->namelen);
43675 + if (filldir(dirent, name, sfep->namelen,
43676 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
43677 + *offset = off & 0x7fffffff;
43678 + return 0;
43679 + }
43680 + } else if (filldir(dirent, sfep->name, sfep->namelen,
43681 off & 0x7fffffff, ino, DT_UNKNOWN)) {
43682 *offset = off & 0x7fffffff;
43683 return 0;
43684 diff -urNp linux-2.6.32.42/grsecurity/gracl_alloc.c linux-2.6.32.42/grsecurity/gracl_alloc.c
43685 --- linux-2.6.32.42/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
43686 +++ linux-2.6.32.42/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
43687 @@ -0,0 +1,105 @@
43688 +#include <linux/kernel.h>
43689 +#include <linux/mm.h>
43690 +#include <linux/slab.h>
43691 +#include <linux/vmalloc.h>
43692 +#include <linux/gracl.h>
43693 +#include <linux/grsecurity.h>
43694 +
43695 +static unsigned long alloc_stack_next = 1;
43696 +static unsigned long alloc_stack_size = 1;
43697 +static void **alloc_stack;
43698 +
43699 +static __inline__ int
43700 +alloc_pop(void)
43701 +{
43702 + if (alloc_stack_next == 1)
43703 + return 0;
43704 +
43705 + kfree(alloc_stack[alloc_stack_next - 2]);
43706 +
43707 + alloc_stack_next--;
43708 +
43709 + return 1;
43710 +}
43711 +
43712 +static __inline__ int
43713 +alloc_push(void *buf)
43714 +{
43715 + if (alloc_stack_next >= alloc_stack_size)
43716 + return 1;
43717 +
43718 + alloc_stack[alloc_stack_next - 1] = buf;
43719 +
43720 + alloc_stack_next++;
43721 +
43722 + return 0;
43723 +}
43724 +
43725 +void *
43726 +acl_alloc(unsigned long len)
43727 +{
43728 + void *ret = NULL;
43729 +
43730 + if (!len || len > PAGE_SIZE)
43731 + goto out;
43732 +
43733 + ret = kmalloc(len, GFP_KERNEL);
43734 +
43735 + if (ret) {
43736 + if (alloc_push(ret)) {
43737 + kfree(ret);
43738 + ret = NULL;
43739 + }
43740 + }
43741 +
43742 +out:
43743 + return ret;
43744 +}
43745 +
43746 +void *
43747 +acl_alloc_num(unsigned long num, unsigned long len)
43748 +{
43749 + if (!len || (num > (PAGE_SIZE / len)))
43750 + return NULL;
43751 +
43752 + return acl_alloc(num * len);
43753 +}
43754 +
43755 +void
43756 +acl_free_all(void)
43757 +{
43758 + if (gr_acl_is_enabled() || !alloc_stack)
43759 + return;
43760 +
43761 + while (alloc_pop()) ;
43762 +
43763 + if (alloc_stack) {
43764 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
43765 + kfree(alloc_stack);
43766 + else
43767 + vfree(alloc_stack);
43768 + }
43769 +
43770 + alloc_stack = NULL;
43771 + alloc_stack_size = 1;
43772 + alloc_stack_next = 1;
43773 +
43774 + return;
43775 +}
43776 +
43777 +int
43778 +acl_alloc_stack_init(unsigned long size)
43779 +{
43780 + if ((size * sizeof (void *)) <= PAGE_SIZE)
43781 + alloc_stack =
43782 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
43783 + else
43784 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
43785 +
43786 + alloc_stack_size = size;
43787 +
43788 + if (!alloc_stack)
43789 + return 0;
43790 + else
43791 + return 1;
43792 +}
43793 diff -urNp linux-2.6.32.42/grsecurity/gracl.c linux-2.6.32.42/grsecurity/gracl.c
43794 --- linux-2.6.32.42/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
43795 +++ linux-2.6.32.42/grsecurity/gracl.c 2011-06-11 16:24:26.000000000 -0400
43796 @@ -0,0 +1,4085 @@
43797 +#include <linux/kernel.h>
43798 +#include <linux/module.h>
43799 +#include <linux/sched.h>
43800 +#include <linux/mm.h>
43801 +#include <linux/file.h>
43802 +#include <linux/fs.h>
43803 +#include <linux/namei.h>
43804 +#include <linux/mount.h>
43805 +#include <linux/tty.h>
43806 +#include <linux/proc_fs.h>
43807 +#include <linux/smp_lock.h>
43808 +#include <linux/slab.h>
43809 +#include <linux/vmalloc.h>
43810 +#include <linux/types.h>
43811 +#include <linux/sysctl.h>
43812 +#include <linux/netdevice.h>
43813 +#include <linux/ptrace.h>
43814 +#include <linux/gracl.h>
43815 +#include <linux/gralloc.h>
43816 +#include <linux/grsecurity.h>
43817 +#include <linux/grinternal.h>
43818 +#include <linux/pid_namespace.h>
43819 +#include <linux/fdtable.h>
43820 +#include <linux/percpu.h>
43821 +
43822 +#include <asm/uaccess.h>
43823 +#include <asm/errno.h>
43824 +#include <asm/mman.h>
43825 +
43826 +static struct acl_role_db acl_role_set;
43827 +static struct name_db name_set;
43828 +static struct inodev_db inodev_set;
43829 +
43830 +/* for keeping track of userspace pointers used for subjects, so we
43831 + can share references in the kernel as well
43832 +*/
43833 +
43834 +static struct dentry *real_root;
43835 +static struct vfsmount *real_root_mnt;
43836 +
43837 +static struct acl_subj_map_db subj_map_set;
43838 +
43839 +static struct acl_role_label *default_role;
43840 +
43841 +static struct acl_role_label *role_list;
43842 +
43843 +static u16 acl_sp_role_value;
43844 +
43845 +extern char *gr_shared_page[4];
43846 +static DEFINE_MUTEX(gr_dev_mutex);
43847 +DEFINE_RWLOCK(gr_inode_lock);
43848 +
43849 +struct gr_arg *gr_usermode;
43850 +
43851 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
43852 +
43853 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
43854 +extern void gr_clear_learn_entries(void);
43855 +
43856 +#ifdef CONFIG_GRKERNSEC_RESLOG
43857 +extern void gr_log_resource(const struct task_struct *task,
43858 + const int res, const unsigned long wanted, const int gt);
43859 +#endif
43860 +
43861 +unsigned char *gr_system_salt;
43862 +unsigned char *gr_system_sum;
43863 +
43864 +static struct sprole_pw **acl_special_roles = NULL;
43865 +static __u16 num_sprole_pws = 0;
43866 +
43867 +static struct acl_role_label *kernel_role = NULL;
43868 +
43869 +static unsigned int gr_auth_attempts = 0;
43870 +static unsigned long gr_auth_expires = 0UL;
43871 +
43872 +#ifdef CONFIG_NET
43873 +extern struct vfsmount *sock_mnt;
43874 +#endif
43875 +extern struct vfsmount *pipe_mnt;
43876 +extern struct vfsmount *shm_mnt;
43877 +#ifdef CONFIG_HUGETLBFS
43878 +extern struct vfsmount *hugetlbfs_vfsmount;
43879 +#endif
43880 +
43881 +static struct acl_object_label *fakefs_obj_rw;
43882 +static struct acl_object_label *fakefs_obj_rwx;
43883 +
43884 +extern int gr_init_uidset(void);
43885 +extern void gr_free_uidset(void);
43886 +extern void gr_remove_uid(uid_t uid);
43887 +extern int gr_find_uid(uid_t uid);
43888 +
43889 +__inline__ int
43890 +gr_acl_is_enabled(void)
43891 +{
43892 + return (gr_status & GR_READY);
43893 +}
43894 +
43895 +#ifdef CONFIG_BTRFS_FS
43896 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
43897 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
43898 +#endif
43899 +
43900 +static inline dev_t __get_dev(const struct dentry *dentry)
43901 +{
43902 +#ifdef CONFIG_BTRFS_FS
43903 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
43904 + return get_btrfs_dev_from_inode(dentry->d_inode);
43905 + else
43906 +#endif
43907 + return dentry->d_inode->i_sb->s_dev;
43908 +}
43909 +
43910 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
43911 +{
43912 + return __get_dev(dentry);
43913 +}
43914 +
43915 +static char gr_task_roletype_to_char(struct task_struct *task)
43916 +{
43917 + switch (task->role->roletype &
43918 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
43919 + GR_ROLE_SPECIAL)) {
43920 + case GR_ROLE_DEFAULT:
43921 + return 'D';
43922 + case GR_ROLE_USER:
43923 + return 'U';
43924 + case GR_ROLE_GROUP:
43925 + return 'G';
43926 + case GR_ROLE_SPECIAL:
43927 + return 'S';
43928 + }
43929 +
43930 + return 'X';
43931 +}
43932 +
43933 +char gr_roletype_to_char(void)
43934 +{
43935 + return gr_task_roletype_to_char(current);
43936 +}
43937 +
43938 +__inline__ int
43939 +gr_acl_tpe_check(void)
43940 +{
43941 + if (unlikely(!(gr_status & GR_READY)))
43942 + return 0;
43943 + if (current->role->roletype & GR_ROLE_TPE)
43944 + return 1;
43945 + else
43946 + return 0;
43947 +}
43948 +
43949 +int
43950 +gr_handle_rawio(const struct inode *inode)
43951 +{
43952 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
43953 + if (inode && S_ISBLK(inode->i_mode) &&
43954 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
43955 + !capable(CAP_SYS_RAWIO))
43956 + return 1;
43957 +#endif
43958 + return 0;
43959 +}
43960 +
43961 +static int
43962 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
43963 +{
43964 + if (likely(lena != lenb))
43965 + return 0;
43966 +
43967 + return !memcmp(a, b, lena);
43968 +}
43969 +
43970 +/* this must be called with vfsmount_lock and dcache_lock held */
43971 +
43972 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
43973 + struct dentry *root, struct vfsmount *rootmnt,
43974 + char *buffer, int buflen)
43975 +{
43976 + char * end = buffer+buflen;
43977 + char * retval;
43978 + int namelen;
43979 +
43980 + *--end = '\0';
43981 + buflen--;
43982 +
43983 + if (buflen < 1)
43984 + goto Elong;
43985 + /* Get '/' right */
43986 + retval = end-1;
43987 + *retval = '/';
43988 +
43989 + for (;;) {
43990 + struct dentry * parent;
43991 +
43992 + if (dentry == root && vfsmnt == rootmnt)
43993 + break;
43994 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
43995 + /* Global root? */
43996 + if (vfsmnt->mnt_parent == vfsmnt)
43997 + goto global_root;
43998 + dentry = vfsmnt->mnt_mountpoint;
43999 + vfsmnt = vfsmnt->mnt_parent;
44000 + continue;
44001 + }
44002 + parent = dentry->d_parent;
44003 + prefetch(parent);
44004 + namelen = dentry->d_name.len;
44005 + buflen -= namelen + 1;
44006 + if (buflen < 0)
44007 + goto Elong;
44008 + end -= namelen;
44009 + memcpy(end, dentry->d_name.name, namelen);
44010 + *--end = '/';
44011 + retval = end;
44012 + dentry = parent;
44013 + }
44014 +
44015 +out:
44016 + return retval;
44017 +
44018 +global_root:
44019 + namelen = dentry->d_name.len;
44020 + buflen -= namelen;
44021 + if (buflen < 0)
44022 + goto Elong;
44023 + retval -= namelen-1; /* hit the slash */
44024 + memcpy(retval, dentry->d_name.name, namelen);
44025 + goto out;
44026 +Elong:
44027 + retval = ERR_PTR(-ENAMETOOLONG);
44028 + goto out;
44029 +}
44030 +
44031 +static char *
44032 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
44033 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
44034 +{
44035 + char *retval;
44036 +
44037 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
44038 + if (unlikely(IS_ERR(retval)))
44039 + retval = strcpy(buf, "<path too long>");
44040 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
44041 + retval[1] = '\0';
44042 +
44043 + return retval;
44044 +}
44045 +
44046 +static char *
44047 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44048 + char *buf, int buflen)
44049 +{
44050 + char *res;
44051 +
44052 + /* we can use real_root, real_root_mnt, because this is only called
44053 + by the RBAC system */
44054 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
44055 +
44056 + return res;
44057 +}
44058 +
44059 +static char *
44060 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44061 + char *buf, int buflen)
44062 +{
44063 + char *res;
44064 + struct dentry *root;
44065 + struct vfsmount *rootmnt;
44066 + struct task_struct *reaper = &init_task;
44067 +
44068 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
44069 + read_lock(&reaper->fs->lock);
44070 + root = dget(reaper->fs->root.dentry);
44071 + rootmnt = mntget(reaper->fs->root.mnt);
44072 + read_unlock(&reaper->fs->lock);
44073 +
44074 + spin_lock(&dcache_lock);
44075 + spin_lock(&vfsmount_lock);
44076 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
44077 + spin_unlock(&vfsmount_lock);
44078 + spin_unlock(&dcache_lock);
44079 +
44080 + dput(root);
44081 + mntput(rootmnt);
44082 + return res;
44083 +}
44084 +
44085 +static char *
44086 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
44087 +{
44088 + char *ret;
44089 + spin_lock(&dcache_lock);
44090 + spin_lock(&vfsmount_lock);
44091 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44092 + PAGE_SIZE);
44093 + spin_unlock(&vfsmount_lock);
44094 + spin_unlock(&dcache_lock);
44095 + return ret;
44096 +}
44097 +
44098 +char *
44099 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
44100 +{
44101 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44102 + PAGE_SIZE);
44103 +}
44104 +
44105 +char *
44106 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
44107 +{
44108 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
44109 + PAGE_SIZE);
44110 +}
44111 +
44112 +char *
44113 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
44114 +{
44115 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
44116 + PAGE_SIZE);
44117 +}
44118 +
44119 +char *
44120 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
44121 +{
44122 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
44123 + PAGE_SIZE);
44124 +}
44125 +
44126 +char *
44127 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
44128 +{
44129 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
44130 + PAGE_SIZE);
44131 +}
44132 +
44133 +__inline__ __u32
44134 +to_gr_audit(const __u32 reqmode)
44135 +{
44136 + /* masks off auditable permission flags, then shifts them to create
44137 + auditing flags, and adds the special case of append auditing if
44138 + we're requesting write */
44139 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
44140 +}
44141 +
44142 +struct acl_subject_label *
44143 +lookup_subject_map(const struct acl_subject_label *userp)
44144 +{
44145 + unsigned int index = shash(userp, subj_map_set.s_size);
44146 + struct subject_map *match;
44147 +
44148 + match = subj_map_set.s_hash[index];
44149 +
44150 + while (match && match->user != userp)
44151 + match = match->next;
44152 +
44153 + if (match != NULL)
44154 + return match->kernel;
44155 + else
44156 + return NULL;
44157 +}
44158 +
44159 +static void
44160 +insert_subj_map_entry(struct subject_map *subjmap)
44161 +{
44162 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
44163 + struct subject_map **curr;
44164 +
44165 + subjmap->prev = NULL;
44166 +
44167 + curr = &subj_map_set.s_hash[index];
44168 + if (*curr != NULL)
44169 + (*curr)->prev = subjmap;
44170 +
44171 + subjmap->next = *curr;
44172 + *curr = subjmap;
44173 +
44174 + return;
44175 +}
44176 +
44177 +static struct acl_role_label *
44178 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
44179 + const gid_t gid)
44180 +{
44181 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
44182 + struct acl_role_label *match;
44183 + struct role_allowed_ip *ipp;
44184 + unsigned int x;
44185 + u32 curr_ip = task->signal->curr_ip;
44186 +
44187 + task->signal->saved_ip = curr_ip;
44188 +
44189 + match = acl_role_set.r_hash[index];
44190 +
44191 + while (match) {
44192 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
44193 + for (x = 0; x < match->domain_child_num; x++) {
44194 + if (match->domain_children[x] == uid)
44195 + goto found;
44196 + }
44197 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
44198 + break;
44199 + match = match->next;
44200 + }
44201 +found:
44202 + if (match == NULL) {
44203 + try_group:
44204 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
44205 + match = acl_role_set.r_hash[index];
44206 +
44207 + while (match) {
44208 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
44209 + for (x = 0; x < match->domain_child_num; x++) {
44210 + if (match->domain_children[x] == gid)
44211 + goto found2;
44212 + }
44213 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
44214 + break;
44215 + match = match->next;
44216 + }
44217 +found2:
44218 + if (match == NULL)
44219 + match = default_role;
44220 + if (match->allowed_ips == NULL)
44221 + return match;
44222 + else {
44223 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44224 + if (likely
44225 + ((ntohl(curr_ip) & ipp->netmask) ==
44226 + (ntohl(ipp->addr) & ipp->netmask)))
44227 + return match;
44228 + }
44229 + match = default_role;
44230 + }
44231 + } else if (match->allowed_ips == NULL) {
44232 + return match;
44233 + } else {
44234 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44235 + if (likely
44236 + ((ntohl(curr_ip) & ipp->netmask) ==
44237 + (ntohl(ipp->addr) & ipp->netmask)))
44238 + return match;
44239 + }
44240 + goto try_group;
44241 + }
44242 +
44243 + return match;
44244 +}
44245 +
44246 +struct acl_subject_label *
44247 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
44248 + const struct acl_role_label *role)
44249 +{
44250 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44251 + struct acl_subject_label *match;
44252 +
44253 + match = role->subj_hash[index];
44254 +
44255 + while (match && (match->inode != ino || match->device != dev ||
44256 + (match->mode & GR_DELETED))) {
44257 + match = match->next;
44258 + }
44259 +
44260 + if (match && !(match->mode & GR_DELETED))
44261 + return match;
44262 + else
44263 + return NULL;
44264 +}
44265 +
44266 +struct acl_subject_label *
44267 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
44268 + const struct acl_role_label *role)
44269 +{
44270 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44271 + struct acl_subject_label *match;
44272 +
44273 + match = role->subj_hash[index];
44274 +
44275 + while (match && (match->inode != ino || match->device != dev ||
44276 + !(match->mode & GR_DELETED))) {
44277 + match = match->next;
44278 + }
44279 +
44280 + if (match && (match->mode & GR_DELETED))
44281 + return match;
44282 + else
44283 + return NULL;
44284 +}
44285 +
44286 +static struct acl_object_label *
44287 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
44288 + const struct acl_subject_label *subj)
44289 +{
44290 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44291 + struct acl_object_label *match;
44292 +
44293 + match = subj->obj_hash[index];
44294 +
44295 + while (match && (match->inode != ino || match->device != dev ||
44296 + (match->mode & GR_DELETED))) {
44297 + match = match->next;
44298 + }
44299 +
44300 + if (match && !(match->mode & GR_DELETED))
44301 + return match;
44302 + else
44303 + return NULL;
44304 +}
44305 +
44306 +static struct acl_object_label *
44307 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
44308 + const struct acl_subject_label *subj)
44309 +{
44310 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44311 + struct acl_object_label *match;
44312 +
44313 + match = subj->obj_hash[index];
44314 +
44315 + while (match && (match->inode != ino || match->device != dev ||
44316 + !(match->mode & GR_DELETED))) {
44317 + match = match->next;
44318 + }
44319 +
44320 + if (match && (match->mode & GR_DELETED))
44321 + return match;
44322 +
44323 + match = subj->obj_hash[index];
44324 +
44325 + while (match && (match->inode != ino || match->device != dev ||
44326 + (match->mode & GR_DELETED))) {
44327 + match = match->next;
44328 + }
44329 +
44330 + if (match && !(match->mode & GR_DELETED))
44331 + return match;
44332 + else
44333 + return NULL;
44334 +}
44335 +
44336 +static struct name_entry *
44337 +lookup_name_entry(const char *name)
44338 +{
44339 + unsigned int len = strlen(name);
44340 + unsigned int key = full_name_hash(name, len);
44341 + unsigned int index = key % name_set.n_size;
44342 + struct name_entry *match;
44343 +
44344 + match = name_set.n_hash[index];
44345 +
44346 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
44347 + match = match->next;
44348 +
44349 + return match;
44350 +}
44351 +
44352 +static struct name_entry *
44353 +lookup_name_entry_create(const char *name)
44354 +{
44355 + unsigned int len = strlen(name);
44356 + unsigned int key = full_name_hash(name, len);
44357 + unsigned int index = key % name_set.n_size;
44358 + struct name_entry *match;
44359 +
44360 + match = name_set.n_hash[index];
44361 +
44362 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44363 + !match->deleted))
44364 + match = match->next;
44365 +
44366 + if (match && match->deleted)
44367 + return match;
44368 +
44369 + match = name_set.n_hash[index];
44370 +
44371 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44372 + match->deleted))
44373 + match = match->next;
44374 +
44375 + if (match && !match->deleted)
44376 + return match;
44377 + else
44378 + return NULL;
44379 +}
44380 +
44381 +static struct inodev_entry *
44382 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
44383 +{
44384 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
44385 + struct inodev_entry *match;
44386 +
44387 + match = inodev_set.i_hash[index];
44388 +
44389 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
44390 + match = match->next;
44391 +
44392 + return match;
44393 +}
44394 +
44395 +static void
44396 +insert_inodev_entry(struct inodev_entry *entry)
44397 +{
44398 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
44399 + inodev_set.i_size);
44400 + struct inodev_entry **curr;
44401 +
44402 + entry->prev = NULL;
44403 +
44404 + curr = &inodev_set.i_hash[index];
44405 + if (*curr != NULL)
44406 + (*curr)->prev = entry;
44407 +
44408 + entry->next = *curr;
44409 + *curr = entry;
44410 +
44411 + return;
44412 +}
44413 +
44414 +static void
44415 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
44416 +{
44417 + unsigned int index =
44418 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
44419 + struct acl_role_label **curr;
44420 + struct acl_role_label *tmp;
44421 +
44422 + curr = &acl_role_set.r_hash[index];
44423 +
44424 + /* if role was already inserted due to domains and already has
44425 + a role in the same bucket as it attached, then we need to
44426 + combine these two buckets
44427 + */
44428 + if (role->next) {
44429 + tmp = role->next;
44430 + while (tmp->next)
44431 + tmp = tmp->next;
44432 + tmp->next = *curr;
44433 + } else
44434 + role->next = *curr;
44435 + *curr = role;
44436 +
44437 + return;
44438 +}
44439 +
44440 +static void
44441 +insert_acl_role_label(struct acl_role_label *role)
44442 +{
44443 + int i;
44444 +
44445 + if (role_list == NULL) {
44446 + role_list = role;
44447 + role->prev = NULL;
44448 + } else {
44449 + role->prev = role_list;
44450 + role_list = role;
44451 + }
44452 +
44453 + /* used for hash chains */
44454 + role->next = NULL;
44455 +
44456 + if (role->roletype & GR_ROLE_DOMAIN) {
44457 + for (i = 0; i < role->domain_child_num; i++)
44458 + __insert_acl_role_label(role, role->domain_children[i]);
44459 + } else
44460 + __insert_acl_role_label(role, role->uidgid);
44461 +}
44462 +
44463 +static int
44464 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
44465 +{
44466 + struct name_entry **curr, *nentry;
44467 + struct inodev_entry *ientry;
44468 + unsigned int len = strlen(name);
44469 + unsigned int key = full_name_hash(name, len);
44470 + unsigned int index = key % name_set.n_size;
44471 +
44472 + curr = &name_set.n_hash[index];
44473 +
44474 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
44475 + curr = &((*curr)->next);
44476 +
44477 + if (*curr != NULL)
44478 + return 1;
44479 +
44480 + nentry = acl_alloc(sizeof (struct name_entry));
44481 + if (nentry == NULL)
44482 + return 0;
44483 + ientry = acl_alloc(sizeof (struct inodev_entry));
44484 + if (ientry == NULL)
44485 + return 0;
44486 + ientry->nentry = nentry;
44487 +
44488 + nentry->key = key;
44489 + nentry->name = name;
44490 + nentry->inode = inode;
44491 + nentry->device = device;
44492 + nentry->len = len;
44493 + nentry->deleted = deleted;
44494 +
44495 + nentry->prev = NULL;
44496 + curr = &name_set.n_hash[index];
44497 + if (*curr != NULL)
44498 + (*curr)->prev = nentry;
44499 + nentry->next = *curr;
44500 + *curr = nentry;
44501 +
44502 + /* insert us into the table searchable by inode/dev */
44503 + insert_inodev_entry(ientry);
44504 +
44505 + return 1;
44506 +}
44507 +
44508 +static void
44509 +insert_acl_obj_label(struct acl_object_label *obj,
44510 + struct acl_subject_label *subj)
44511 +{
44512 + unsigned int index =
44513 + fhash(obj->inode, obj->device, subj->obj_hash_size);
44514 + struct acl_object_label **curr;
44515 +
44516 +
44517 + obj->prev = NULL;
44518 +
44519 + curr = &subj->obj_hash[index];
44520 + if (*curr != NULL)
44521 + (*curr)->prev = obj;
44522 +
44523 + obj->next = *curr;
44524 + *curr = obj;
44525 +
44526 + return;
44527 +}
44528 +
44529 +static void
44530 +insert_acl_subj_label(struct acl_subject_label *obj,
44531 + struct acl_role_label *role)
44532 +{
44533 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
44534 + struct acl_subject_label **curr;
44535 +
44536 + obj->prev = NULL;
44537 +
44538 + curr = &role->subj_hash[index];
44539 + if (*curr != NULL)
44540 + (*curr)->prev = obj;
44541 +
44542 + obj->next = *curr;
44543 + *curr = obj;
44544 +
44545 + return;
44546 +}
44547 +
44548 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
44549 +
44550 +static void *
44551 +create_table(__u32 * len, int elementsize)
44552 +{
44553 + unsigned int table_sizes[] = {
44554 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
44555 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
44556 + 4194301, 8388593, 16777213, 33554393, 67108859
44557 + };
44558 + void *newtable = NULL;
44559 + unsigned int pwr = 0;
44560 +
44561 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
44562 + table_sizes[pwr] <= *len)
44563 + pwr++;
44564 +
44565 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
44566 + return newtable;
44567 +
44568 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
44569 + newtable =
44570 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
44571 + else
44572 + newtable = vmalloc(table_sizes[pwr] * elementsize);
44573 +
44574 + *len = table_sizes[pwr];
44575 +
44576 + return newtable;
44577 +}
44578 +
44579 +static int
44580 +init_variables(const struct gr_arg *arg)
44581 +{
44582 + struct task_struct *reaper = &init_task;
44583 + unsigned int stacksize;
44584 +
44585 + subj_map_set.s_size = arg->role_db.num_subjects;
44586 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
44587 + name_set.n_size = arg->role_db.num_objects;
44588 + inodev_set.i_size = arg->role_db.num_objects;
44589 +
44590 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
44591 + !name_set.n_size || !inodev_set.i_size)
44592 + return 1;
44593 +
44594 + if (!gr_init_uidset())
44595 + return 1;
44596 +
44597 + /* set up the stack that holds allocation info */
44598 +
44599 + stacksize = arg->role_db.num_pointers + 5;
44600 +
44601 + if (!acl_alloc_stack_init(stacksize))
44602 + return 1;
44603 +
44604 + /* grab reference for the real root dentry and vfsmount */
44605 + read_lock(&reaper->fs->lock);
44606 + real_root = dget(reaper->fs->root.dentry);
44607 + real_root_mnt = mntget(reaper->fs->root.mnt);
44608 + read_unlock(&reaper->fs->lock);
44609 +
44610 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44611 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
44612 +#endif
44613 +
44614 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
44615 + if (fakefs_obj_rw == NULL)
44616 + return 1;
44617 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
44618 +
44619 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
44620 + if (fakefs_obj_rwx == NULL)
44621 + return 1;
44622 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
44623 +
44624 + subj_map_set.s_hash =
44625 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
44626 + acl_role_set.r_hash =
44627 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
44628 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
44629 + inodev_set.i_hash =
44630 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
44631 +
44632 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
44633 + !name_set.n_hash || !inodev_set.i_hash)
44634 + return 1;
44635 +
44636 + memset(subj_map_set.s_hash, 0,
44637 + sizeof(struct subject_map *) * subj_map_set.s_size);
44638 + memset(acl_role_set.r_hash, 0,
44639 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
44640 + memset(name_set.n_hash, 0,
44641 + sizeof (struct name_entry *) * name_set.n_size);
44642 + memset(inodev_set.i_hash, 0,
44643 + sizeof (struct inodev_entry *) * inodev_set.i_size);
44644 +
44645 + return 0;
44646 +}
44647 +
44648 +/* free information not needed after startup
44649 + currently contains user->kernel pointer mappings for subjects
44650 +*/
44651 +
44652 +static void
44653 +free_init_variables(void)
44654 +{
44655 + __u32 i;
44656 +
44657 + if (subj_map_set.s_hash) {
44658 + for (i = 0; i < subj_map_set.s_size; i++) {
44659 + if (subj_map_set.s_hash[i]) {
44660 + kfree(subj_map_set.s_hash[i]);
44661 + subj_map_set.s_hash[i] = NULL;
44662 + }
44663 + }
44664 +
44665 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
44666 + PAGE_SIZE)
44667 + kfree(subj_map_set.s_hash);
44668 + else
44669 + vfree(subj_map_set.s_hash);
44670 + }
44671 +
44672 + return;
44673 +}
44674 +
44675 +static void
44676 +free_variables(void)
44677 +{
44678 + struct acl_subject_label *s;
44679 + struct acl_role_label *r;
44680 + struct task_struct *task, *task2;
44681 + unsigned int x;
44682 +
44683 + gr_clear_learn_entries();
44684 +
44685 + read_lock(&tasklist_lock);
44686 + do_each_thread(task2, task) {
44687 + task->acl_sp_role = 0;
44688 + task->acl_role_id = 0;
44689 + task->acl = NULL;
44690 + task->role = NULL;
44691 + } while_each_thread(task2, task);
44692 + read_unlock(&tasklist_lock);
44693 +
44694 + /* release the reference to the real root dentry and vfsmount */
44695 + if (real_root)
44696 + dput(real_root);
44697 + real_root = NULL;
44698 + if (real_root_mnt)
44699 + mntput(real_root_mnt);
44700 + real_root_mnt = NULL;
44701 +
44702 + /* free all object hash tables */
44703 +
44704 + FOR_EACH_ROLE_START(r)
44705 + if (r->subj_hash == NULL)
44706 + goto next_role;
44707 + FOR_EACH_SUBJECT_START(r, s, x)
44708 + if (s->obj_hash == NULL)
44709 + break;
44710 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
44711 + kfree(s->obj_hash);
44712 + else
44713 + vfree(s->obj_hash);
44714 + FOR_EACH_SUBJECT_END(s, x)
44715 + FOR_EACH_NESTED_SUBJECT_START(r, s)
44716 + if (s->obj_hash == NULL)
44717 + break;
44718 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
44719 + kfree(s->obj_hash);
44720 + else
44721 + vfree(s->obj_hash);
44722 + FOR_EACH_NESTED_SUBJECT_END(s)
44723 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
44724 + kfree(r->subj_hash);
44725 + else
44726 + vfree(r->subj_hash);
44727 + r->subj_hash = NULL;
44728 +next_role:
44729 + FOR_EACH_ROLE_END(r)
44730 +
44731 + acl_free_all();
44732 +
44733 + if (acl_role_set.r_hash) {
44734 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
44735 + PAGE_SIZE)
44736 + kfree(acl_role_set.r_hash);
44737 + else
44738 + vfree(acl_role_set.r_hash);
44739 + }
44740 + if (name_set.n_hash) {
44741 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
44742 + PAGE_SIZE)
44743 + kfree(name_set.n_hash);
44744 + else
44745 + vfree(name_set.n_hash);
44746 + }
44747 +
44748 + if (inodev_set.i_hash) {
44749 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
44750 + PAGE_SIZE)
44751 + kfree(inodev_set.i_hash);
44752 + else
44753 + vfree(inodev_set.i_hash);
44754 + }
44755 +
44756 + gr_free_uidset();
44757 +
44758 + memset(&name_set, 0, sizeof (struct name_db));
44759 + memset(&inodev_set, 0, sizeof (struct inodev_db));
44760 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
44761 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
44762 +
44763 + default_role = NULL;
44764 + role_list = NULL;
44765 +
44766 + return;
44767 +}
44768 +
44769 +static __u32
44770 +count_user_objs(struct acl_object_label *userp)
44771 +{
44772 + struct acl_object_label o_tmp;
44773 + __u32 num = 0;
44774 +
44775 + while (userp) {
44776 + if (copy_from_user(&o_tmp, userp,
44777 + sizeof (struct acl_object_label)))
44778 + break;
44779 +
44780 + userp = o_tmp.prev;
44781 + num++;
44782 + }
44783 +
44784 + return num;
44785 +}
44786 +
44787 +static struct acl_subject_label *
44788 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
44789 +
44790 +static int
44791 +copy_user_glob(struct acl_object_label *obj)
44792 +{
44793 + struct acl_object_label *g_tmp, **guser;
44794 + unsigned int len;
44795 + char *tmp;
44796 +
44797 + if (obj->globbed == NULL)
44798 + return 0;
44799 +
44800 + guser = &obj->globbed;
44801 + while (*guser) {
44802 + g_tmp = (struct acl_object_label *)
44803 + acl_alloc(sizeof (struct acl_object_label));
44804 + if (g_tmp == NULL)
44805 + return -ENOMEM;
44806 +
44807 + if (copy_from_user(g_tmp, *guser,
44808 + sizeof (struct acl_object_label)))
44809 + return -EFAULT;
44810 +
44811 + len = strnlen_user(g_tmp->filename, PATH_MAX);
44812 +
44813 + if (!len || len >= PATH_MAX)
44814 + return -EINVAL;
44815 +
44816 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44817 + return -ENOMEM;
44818 +
44819 + if (copy_from_user(tmp, g_tmp->filename, len))
44820 + return -EFAULT;
44821 + tmp[len-1] = '\0';
44822 + g_tmp->filename = tmp;
44823 +
44824 + *guser = g_tmp;
44825 + guser = &(g_tmp->next);
44826 + }
44827 +
44828 + return 0;
44829 +}
44830 +
44831 +static int
44832 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
44833 + struct acl_role_label *role)
44834 +{
44835 + struct acl_object_label *o_tmp;
44836 + unsigned int len;
44837 + int ret;
44838 + char *tmp;
44839 +
44840 + while (userp) {
44841 + if ((o_tmp = (struct acl_object_label *)
44842 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
44843 + return -ENOMEM;
44844 +
44845 + if (copy_from_user(o_tmp, userp,
44846 + sizeof (struct acl_object_label)))
44847 + return -EFAULT;
44848 +
44849 + userp = o_tmp->prev;
44850 +
44851 + len = strnlen_user(o_tmp->filename, PATH_MAX);
44852 +
44853 + if (!len || len >= PATH_MAX)
44854 + return -EINVAL;
44855 +
44856 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44857 + return -ENOMEM;
44858 +
44859 + if (copy_from_user(tmp, o_tmp->filename, len))
44860 + return -EFAULT;
44861 + tmp[len-1] = '\0';
44862 + o_tmp->filename = tmp;
44863 +
44864 + insert_acl_obj_label(o_tmp, subj);
44865 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
44866 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
44867 + return -ENOMEM;
44868 +
44869 + ret = copy_user_glob(o_tmp);
44870 + if (ret)
44871 + return ret;
44872 +
44873 + if (o_tmp->nested) {
44874 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
44875 + if (IS_ERR(o_tmp->nested))
44876 + return PTR_ERR(o_tmp->nested);
44877 +
44878 + /* insert into nested subject list */
44879 + o_tmp->nested->next = role->hash->first;
44880 + role->hash->first = o_tmp->nested;
44881 + }
44882 + }
44883 +
44884 + return 0;
44885 +}
44886 +
44887 +static __u32
44888 +count_user_subjs(struct acl_subject_label *userp)
44889 +{
44890 + struct acl_subject_label s_tmp;
44891 + __u32 num = 0;
44892 +
44893 + while (userp) {
44894 + if (copy_from_user(&s_tmp, userp,
44895 + sizeof (struct acl_subject_label)))
44896 + break;
44897 +
44898 + userp = s_tmp.prev;
44899 + /* do not count nested subjects against this count, since
44900 + they are not included in the hash table, but are
44901 + attached to objects. We have already counted
44902 + the subjects in userspace for the allocation
44903 + stack
44904 + */
44905 + if (!(s_tmp.mode & GR_NESTED))
44906 + num++;
44907 + }
44908 +
44909 + return num;
44910 +}
44911 +
44912 +static int
44913 +copy_user_allowedips(struct acl_role_label *rolep)
44914 +{
44915 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
44916 +
44917 + ruserip = rolep->allowed_ips;
44918 +
44919 + while (ruserip) {
44920 + rlast = rtmp;
44921 +
44922 + if ((rtmp = (struct role_allowed_ip *)
44923 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
44924 + return -ENOMEM;
44925 +
44926 + if (copy_from_user(rtmp, ruserip,
44927 + sizeof (struct role_allowed_ip)))
44928 + return -EFAULT;
44929 +
44930 + ruserip = rtmp->prev;
44931 +
44932 + if (!rlast) {
44933 + rtmp->prev = NULL;
44934 + rolep->allowed_ips = rtmp;
44935 + } else {
44936 + rlast->next = rtmp;
44937 + rtmp->prev = rlast;
44938 + }
44939 +
44940 + if (!ruserip)
44941 + rtmp->next = NULL;
44942 + }
44943 +
44944 + return 0;
44945 +}
44946 +
44947 +static int
44948 +copy_user_transitions(struct acl_role_label *rolep)
44949 +{
44950 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
44951 +
44952 + unsigned int len;
44953 + char *tmp;
44954 +
44955 + rusertp = rolep->transitions;
44956 +
44957 + while (rusertp) {
44958 + rlast = rtmp;
44959 +
44960 + if ((rtmp = (struct role_transition *)
44961 + acl_alloc(sizeof (struct role_transition))) == NULL)
44962 + return -ENOMEM;
44963 +
44964 + if (copy_from_user(rtmp, rusertp,
44965 + sizeof (struct role_transition)))
44966 + return -EFAULT;
44967 +
44968 + rusertp = rtmp->prev;
44969 +
44970 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
44971 +
44972 + if (!len || len >= GR_SPROLE_LEN)
44973 + return -EINVAL;
44974 +
44975 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44976 + return -ENOMEM;
44977 +
44978 + if (copy_from_user(tmp, rtmp->rolename, len))
44979 + return -EFAULT;
44980 + tmp[len-1] = '\0';
44981 + rtmp->rolename = tmp;
44982 +
44983 + if (!rlast) {
44984 + rtmp->prev = NULL;
44985 + rolep->transitions = rtmp;
44986 + } else {
44987 + rlast->next = rtmp;
44988 + rtmp->prev = rlast;
44989 + }
44990 +
44991 + if (!rusertp)
44992 + rtmp->next = NULL;
44993 + }
44994 +
44995 + return 0;
44996 +}
44997 +
44998 +static struct acl_subject_label *
44999 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
45000 +{
45001 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
45002 + unsigned int len;
45003 + char *tmp;
45004 + __u32 num_objs;
45005 + struct acl_ip_label **i_tmp, *i_utmp2;
45006 + struct gr_hash_struct ghash;
45007 + struct subject_map *subjmap;
45008 + unsigned int i_num;
45009 + int err;
45010 +
45011 + s_tmp = lookup_subject_map(userp);
45012 +
45013 + /* we've already copied this subject into the kernel, just return
45014 + the reference to it, and don't copy it over again
45015 + */
45016 + if (s_tmp)
45017 + return(s_tmp);
45018 +
45019 + if ((s_tmp = (struct acl_subject_label *)
45020 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
45021 + return ERR_PTR(-ENOMEM);
45022 +
45023 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
45024 + if (subjmap == NULL)
45025 + return ERR_PTR(-ENOMEM);
45026 +
45027 + subjmap->user = userp;
45028 + subjmap->kernel = s_tmp;
45029 + insert_subj_map_entry(subjmap);
45030 +
45031 + if (copy_from_user(s_tmp, userp,
45032 + sizeof (struct acl_subject_label)))
45033 + return ERR_PTR(-EFAULT);
45034 +
45035 + len = strnlen_user(s_tmp->filename, PATH_MAX);
45036 +
45037 + if (!len || len >= PATH_MAX)
45038 + return ERR_PTR(-EINVAL);
45039 +
45040 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45041 + return ERR_PTR(-ENOMEM);
45042 +
45043 + if (copy_from_user(tmp, s_tmp->filename, len))
45044 + return ERR_PTR(-EFAULT);
45045 + tmp[len-1] = '\0';
45046 + s_tmp->filename = tmp;
45047 +
45048 + if (!strcmp(s_tmp->filename, "/"))
45049 + role->root_label = s_tmp;
45050 +
45051 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
45052 + return ERR_PTR(-EFAULT);
45053 +
45054 + /* copy user and group transition tables */
45055 +
45056 + if (s_tmp->user_trans_num) {
45057 + uid_t *uidlist;
45058 +
45059 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
45060 + if (uidlist == NULL)
45061 + return ERR_PTR(-ENOMEM);
45062 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
45063 + return ERR_PTR(-EFAULT);
45064 +
45065 + s_tmp->user_transitions = uidlist;
45066 + }
45067 +
45068 + if (s_tmp->group_trans_num) {
45069 + gid_t *gidlist;
45070 +
45071 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
45072 + if (gidlist == NULL)
45073 + return ERR_PTR(-ENOMEM);
45074 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
45075 + return ERR_PTR(-EFAULT);
45076 +
45077 + s_tmp->group_transitions = gidlist;
45078 + }
45079 +
45080 + /* set up object hash table */
45081 + num_objs = count_user_objs(ghash.first);
45082 +
45083 + s_tmp->obj_hash_size = num_objs;
45084 + s_tmp->obj_hash =
45085 + (struct acl_object_label **)
45086 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
45087 +
45088 + if (!s_tmp->obj_hash)
45089 + return ERR_PTR(-ENOMEM);
45090 +
45091 + memset(s_tmp->obj_hash, 0,
45092 + s_tmp->obj_hash_size *
45093 + sizeof (struct acl_object_label *));
45094 +
45095 + /* add in objects */
45096 + err = copy_user_objs(ghash.first, s_tmp, role);
45097 +
45098 + if (err)
45099 + return ERR_PTR(err);
45100 +
45101 + /* set pointer for parent subject */
45102 + if (s_tmp->parent_subject) {
45103 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
45104 +
45105 + if (IS_ERR(s_tmp2))
45106 + return s_tmp2;
45107 +
45108 + s_tmp->parent_subject = s_tmp2;
45109 + }
45110 +
45111 + /* add in ip acls */
45112 +
45113 + if (!s_tmp->ip_num) {
45114 + s_tmp->ips = NULL;
45115 + goto insert;
45116 + }
45117 +
45118 + i_tmp =
45119 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
45120 + sizeof (struct acl_ip_label *));
45121 +
45122 + if (!i_tmp)
45123 + return ERR_PTR(-ENOMEM);
45124 +
45125 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
45126 + *(i_tmp + i_num) =
45127 + (struct acl_ip_label *)
45128 + acl_alloc(sizeof (struct acl_ip_label));
45129 + if (!*(i_tmp + i_num))
45130 + return ERR_PTR(-ENOMEM);
45131 +
45132 + if (copy_from_user
45133 + (&i_utmp2, s_tmp->ips + i_num,
45134 + sizeof (struct acl_ip_label *)))
45135 + return ERR_PTR(-EFAULT);
45136 +
45137 + if (copy_from_user
45138 + (*(i_tmp + i_num), i_utmp2,
45139 + sizeof (struct acl_ip_label)))
45140 + return ERR_PTR(-EFAULT);
45141 +
45142 + if ((*(i_tmp + i_num))->iface == NULL)
45143 + continue;
45144 +
45145 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
45146 + if (!len || len >= IFNAMSIZ)
45147 + return ERR_PTR(-EINVAL);
45148 + tmp = acl_alloc(len);
45149 + if (tmp == NULL)
45150 + return ERR_PTR(-ENOMEM);
45151 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
45152 + return ERR_PTR(-EFAULT);
45153 + (*(i_tmp + i_num))->iface = tmp;
45154 + }
45155 +
45156 + s_tmp->ips = i_tmp;
45157 +
45158 +insert:
45159 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
45160 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
45161 + return ERR_PTR(-ENOMEM);
45162 +
45163 + return s_tmp;
45164 +}
45165 +
45166 +static int
45167 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
45168 +{
45169 + struct acl_subject_label s_pre;
45170 + struct acl_subject_label * ret;
45171 + int err;
45172 +
45173 + while (userp) {
45174 + if (copy_from_user(&s_pre, userp,
45175 + sizeof (struct acl_subject_label)))
45176 + return -EFAULT;
45177 +
45178 + /* do not add nested subjects here, add
45179 + while parsing objects
45180 + */
45181 +
45182 + if (s_pre.mode & GR_NESTED) {
45183 + userp = s_pre.prev;
45184 + continue;
45185 + }
45186 +
45187 + ret = do_copy_user_subj(userp, role);
45188 +
45189 + err = PTR_ERR(ret);
45190 + if (IS_ERR(ret))
45191 + return err;
45192 +
45193 + insert_acl_subj_label(ret, role);
45194 +
45195 + userp = s_pre.prev;
45196 + }
45197 +
45198 + return 0;
45199 +}
45200 +
45201 +static int
45202 +copy_user_acl(struct gr_arg *arg)
45203 +{
45204 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
45205 + struct sprole_pw *sptmp;
45206 + struct gr_hash_struct *ghash;
45207 + uid_t *domainlist;
45208 + unsigned int r_num;
45209 + unsigned int len;
45210 + char *tmp;
45211 + int err = 0;
45212 + __u16 i;
45213 + __u32 num_subjs;
45214 +
45215 + /* we need a default and kernel role */
45216 + if (arg->role_db.num_roles < 2)
45217 + return -EINVAL;
45218 +
45219 + /* copy special role authentication info from userspace */
45220 +
45221 + num_sprole_pws = arg->num_sprole_pws;
45222 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
45223 +
45224 + if (!acl_special_roles) {
45225 + err = -ENOMEM;
45226 + goto cleanup;
45227 + }
45228 +
45229 + for (i = 0; i < num_sprole_pws; i++) {
45230 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
45231 + if (!sptmp) {
45232 + err = -ENOMEM;
45233 + goto cleanup;
45234 + }
45235 + if (copy_from_user(sptmp, arg->sprole_pws + i,
45236 + sizeof (struct sprole_pw))) {
45237 + err = -EFAULT;
45238 + goto cleanup;
45239 + }
45240 +
45241 + len =
45242 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
45243 +
45244 + if (!len || len >= GR_SPROLE_LEN) {
45245 + err = -EINVAL;
45246 + goto cleanup;
45247 + }
45248 +
45249 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
45250 + err = -ENOMEM;
45251 + goto cleanup;
45252 + }
45253 +
45254 + if (copy_from_user(tmp, sptmp->rolename, len)) {
45255 + err = -EFAULT;
45256 + goto cleanup;
45257 + }
45258 + tmp[len-1] = '\0';
45259 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45260 + printk(KERN_ALERT "Copying special role %s\n", tmp);
45261 +#endif
45262 + sptmp->rolename = tmp;
45263 + acl_special_roles[i] = sptmp;
45264 + }
45265 +
45266 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
45267 +
45268 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
45269 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
45270 +
45271 + if (!r_tmp) {
45272 + err = -ENOMEM;
45273 + goto cleanup;
45274 + }
45275 +
45276 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
45277 + sizeof (struct acl_role_label *))) {
45278 + err = -EFAULT;
45279 + goto cleanup;
45280 + }
45281 +
45282 + if (copy_from_user(r_tmp, r_utmp2,
45283 + sizeof (struct acl_role_label))) {
45284 + err = -EFAULT;
45285 + goto cleanup;
45286 + }
45287 +
45288 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
45289 +
45290 + if (!len || len >= PATH_MAX) {
45291 + err = -EINVAL;
45292 + goto cleanup;
45293 + }
45294 +
45295 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
45296 + err = -ENOMEM;
45297 + goto cleanup;
45298 + }
45299 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
45300 + err = -EFAULT;
45301 + goto cleanup;
45302 + }
45303 + tmp[len-1] = '\0';
45304 + r_tmp->rolename = tmp;
45305 +
45306 + if (!strcmp(r_tmp->rolename, "default")
45307 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
45308 + default_role = r_tmp;
45309 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
45310 + kernel_role = r_tmp;
45311 + }
45312 +
45313 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
45314 + err = -ENOMEM;
45315 + goto cleanup;
45316 + }
45317 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
45318 + err = -EFAULT;
45319 + goto cleanup;
45320 + }
45321 +
45322 + r_tmp->hash = ghash;
45323 +
45324 + num_subjs = count_user_subjs(r_tmp->hash->first);
45325 +
45326 + r_tmp->subj_hash_size = num_subjs;
45327 + r_tmp->subj_hash =
45328 + (struct acl_subject_label **)
45329 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
45330 +
45331 + if (!r_tmp->subj_hash) {
45332 + err = -ENOMEM;
45333 + goto cleanup;
45334 + }
45335 +
45336 + err = copy_user_allowedips(r_tmp);
45337 + if (err)
45338 + goto cleanup;
45339 +
45340 + /* copy domain info */
45341 + if (r_tmp->domain_children != NULL) {
45342 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
45343 + if (domainlist == NULL) {
45344 + err = -ENOMEM;
45345 + goto cleanup;
45346 + }
45347 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
45348 + err = -EFAULT;
45349 + goto cleanup;
45350 + }
45351 + r_tmp->domain_children = domainlist;
45352 + }
45353 +
45354 + err = copy_user_transitions(r_tmp);
45355 + if (err)
45356 + goto cleanup;
45357 +
45358 + memset(r_tmp->subj_hash, 0,
45359 + r_tmp->subj_hash_size *
45360 + sizeof (struct acl_subject_label *));
45361 +
45362 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
45363 +
45364 + if (err)
45365 + goto cleanup;
45366 +
45367 + /* set nested subject list to null */
45368 + r_tmp->hash->first = NULL;
45369 +
45370 + insert_acl_role_label(r_tmp);
45371 + }
45372 +
45373 + goto return_err;
45374 + cleanup:
45375 + free_variables();
45376 + return_err:
45377 + return err;
45378 +
45379 +}
45380 +
45381 +static int
45382 +gracl_init(struct gr_arg *args)
45383 +{
45384 + int error = 0;
45385 +
45386 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
45387 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
45388 +
45389 + if (init_variables(args)) {
45390 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
45391 + error = -ENOMEM;
45392 + free_variables();
45393 + goto out;
45394 + }
45395 +
45396 + error = copy_user_acl(args);
45397 + free_init_variables();
45398 + if (error) {
45399 + free_variables();
45400 + goto out;
45401 + }
45402 +
45403 + if ((error = gr_set_acls(0))) {
45404 + free_variables();
45405 + goto out;
45406 + }
45407 +
45408 + pax_open_kernel();
45409 + gr_status |= GR_READY;
45410 + pax_close_kernel();
45411 +
45412 + out:
45413 + return error;
45414 +}
45415 +
45416 +/* derived from glibc fnmatch() 0: match, 1: no match*/
45417 +
45418 +static int
45419 +glob_match(const char *p, const char *n)
45420 +{
45421 + char c;
45422 +
45423 + while ((c = *p++) != '\0') {
45424 + switch (c) {
45425 + case '?':
45426 + if (*n == '\0')
45427 + return 1;
45428 + else if (*n == '/')
45429 + return 1;
45430 + break;
45431 + case '\\':
45432 + if (*n != c)
45433 + return 1;
45434 + break;
45435 + case '*':
45436 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
45437 + if (*n == '/')
45438 + return 1;
45439 + else if (c == '?') {
45440 + if (*n == '\0')
45441 + return 1;
45442 + else
45443 + ++n;
45444 + }
45445 + }
45446 + if (c == '\0') {
45447 + return 0;
45448 + } else {
45449 + const char *endp;
45450 +
45451 + if ((endp = strchr(n, '/')) == NULL)
45452 + endp = n + strlen(n);
45453 +
45454 + if (c == '[') {
45455 + for (--p; n < endp; ++n)
45456 + if (!glob_match(p, n))
45457 + return 0;
45458 + } else if (c == '/') {
45459 + while (*n != '\0' && *n != '/')
45460 + ++n;
45461 + if (*n == '/' && !glob_match(p, n + 1))
45462 + return 0;
45463 + } else {
45464 + for (--p; n < endp; ++n)
45465 + if (*n == c && !glob_match(p, n))
45466 + return 0;
45467 + }
45468 +
45469 + return 1;
45470 + }
45471 + case '[':
45472 + {
45473 + int not;
45474 + char cold;
45475 +
45476 + if (*n == '\0' || *n == '/')
45477 + return 1;
45478 +
45479 + not = (*p == '!' || *p == '^');
45480 + if (not)
45481 + ++p;
45482 +
45483 + c = *p++;
45484 + for (;;) {
45485 + unsigned char fn = (unsigned char)*n;
45486 +
45487 + if (c == '\0')
45488 + return 1;
45489 + else {
45490 + if (c == fn)
45491 + goto matched;
45492 + cold = c;
45493 + c = *p++;
45494 +
45495 + if (c == '-' && *p != ']') {
45496 + unsigned char cend = *p++;
45497 +
45498 + if (cend == '\0')
45499 + return 1;
45500 +
45501 + if (cold <= fn && fn <= cend)
45502 + goto matched;
45503 +
45504 + c = *p++;
45505 + }
45506 + }
45507 +
45508 + if (c == ']')
45509 + break;
45510 + }
45511 + if (!not)
45512 + return 1;
45513 + break;
45514 + matched:
45515 + while (c != ']') {
45516 + if (c == '\0')
45517 + return 1;
45518 +
45519 + c = *p++;
45520 + }
45521 + if (not)
45522 + return 1;
45523 + }
45524 + break;
45525 + default:
45526 + if (c != *n)
45527 + return 1;
45528 + }
45529 +
45530 + ++n;
45531 + }
45532 +
45533 + if (*n == '\0')
45534 + return 0;
45535 +
45536 + if (*n == '/')
45537 + return 0;
45538 +
45539 + return 1;
45540 +}
45541 +
45542 +static struct acl_object_label *
45543 +chk_glob_label(struct acl_object_label *globbed,
45544 + struct dentry *dentry, struct vfsmount *mnt, char **path)
45545 +{
45546 + struct acl_object_label *tmp;
45547 +
45548 + if (*path == NULL)
45549 + *path = gr_to_filename_nolock(dentry, mnt);
45550 +
45551 + tmp = globbed;
45552 +
45553 + while (tmp) {
45554 + if (!glob_match(tmp->filename, *path))
45555 + return tmp;
45556 + tmp = tmp->next;
45557 + }
45558 +
45559 + return NULL;
45560 +}
45561 +
45562 +static struct acl_object_label *
45563 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45564 + const ino_t curr_ino, const dev_t curr_dev,
45565 + const struct acl_subject_label *subj, char **path, const int checkglob)
45566 +{
45567 + struct acl_subject_label *tmpsubj;
45568 + struct acl_object_label *retval;
45569 + struct acl_object_label *retval2;
45570 +
45571 + tmpsubj = (struct acl_subject_label *) subj;
45572 + read_lock(&gr_inode_lock);
45573 + do {
45574 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
45575 + if (retval) {
45576 + if (checkglob && retval->globbed) {
45577 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
45578 + (struct vfsmount *)orig_mnt, path);
45579 + if (retval2)
45580 + retval = retval2;
45581 + }
45582 + break;
45583 + }
45584 + } while ((tmpsubj = tmpsubj->parent_subject));
45585 + read_unlock(&gr_inode_lock);
45586 +
45587 + return retval;
45588 +}
45589 +
45590 +static __inline__ struct acl_object_label *
45591 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45592 + const struct dentry *curr_dentry,
45593 + const struct acl_subject_label *subj, char **path, const int checkglob)
45594 +{
45595 + int newglob = checkglob;
45596 +
45597 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
45598 + as we don't want a / * rule to match instead of the / object
45599 + don't do this for create lookups that call this function though, since they're looking up
45600 + on the parent and thus need globbing checks on all paths
45601 + */
45602 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
45603 + newglob = GR_NO_GLOB;
45604 +
45605 + return __full_lookup(orig_dentry, orig_mnt,
45606 + curr_dentry->d_inode->i_ino,
45607 + __get_dev(curr_dentry), subj, path, newglob);
45608 +}
45609 +
45610 +static struct acl_object_label *
45611 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45612 + const struct acl_subject_label *subj, char *path, const int checkglob)
45613 +{
45614 + struct dentry *dentry = (struct dentry *) l_dentry;
45615 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45616 + struct acl_object_label *retval;
45617 +
45618 + spin_lock(&dcache_lock);
45619 + spin_lock(&vfsmount_lock);
45620 +
45621 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
45622 +#ifdef CONFIG_NET
45623 + mnt == sock_mnt ||
45624 +#endif
45625 +#ifdef CONFIG_HUGETLBFS
45626 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
45627 +#endif
45628 + /* ignore Eric Biederman */
45629 + IS_PRIVATE(l_dentry->d_inode))) {
45630 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
45631 + goto out;
45632 + }
45633 +
45634 + for (;;) {
45635 + if (dentry == real_root && mnt == real_root_mnt)
45636 + break;
45637 +
45638 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45639 + if (mnt->mnt_parent == mnt)
45640 + break;
45641 +
45642 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45643 + if (retval != NULL)
45644 + goto out;
45645 +
45646 + dentry = mnt->mnt_mountpoint;
45647 + mnt = mnt->mnt_parent;
45648 + continue;
45649 + }
45650 +
45651 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45652 + if (retval != NULL)
45653 + goto out;
45654 +
45655 + dentry = dentry->d_parent;
45656 + }
45657 +
45658 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45659 +
45660 + if (retval == NULL)
45661 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
45662 +out:
45663 + spin_unlock(&vfsmount_lock);
45664 + spin_unlock(&dcache_lock);
45665 +
45666 + BUG_ON(retval == NULL);
45667 +
45668 + return retval;
45669 +}
45670 +
45671 +static __inline__ struct acl_object_label *
45672 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45673 + const struct acl_subject_label *subj)
45674 +{
45675 + char *path = NULL;
45676 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
45677 +}
45678 +
45679 +static __inline__ struct acl_object_label *
45680 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45681 + const struct acl_subject_label *subj)
45682 +{
45683 + char *path = NULL;
45684 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
45685 +}
45686 +
45687 +static __inline__ struct acl_object_label *
45688 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45689 + const struct acl_subject_label *subj, char *path)
45690 +{
45691 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
45692 +}
45693 +
45694 +static struct acl_subject_label *
45695 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45696 + const struct acl_role_label *role)
45697 +{
45698 + struct dentry *dentry = (struct dentry *) l_dentry;
45699 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45700 + struct acl_subject_label *retval;
45701 +
45702 + spin_lock(&dcache_lock);
45703 + spin_lock(&vfsmount_lock);
45704 +
45705 + for (;;) {
45706 + if (dentry == real_root && mnt == real_root_mnt)
45707 + break;
45708 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45709 + if (mnt->mnt_parent == mnt)
45710 + break;
45711 +
45712 + read_lock(&gr_inode_lock);
45713 + retval =
45714 + lookup_acl_subj_label(dentry->d_inode->i_ino,
45715 + __get_dev(dentry), role);
45716 + read_unlock(&gr_inode_lock);
45717 + if (retval != NULL)
45718 + goto out;
45719 +
45720 + dentry = mnt->mnt_mountpoint;
45721 + mnt = mnt->mnt_parent;
45722 + continue;
45723 + }
45724 +
45725 + read_lock(&gr_inode_lock);
45726 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
45727 + __get_dev(dentry), role);
45728 + read_unlock(&gr_inode_lock);
45729 + if (retval != NULL)
45730 + goto out;
45731 +
45732 + dentry = dentry->d_parent;
45733 + }
45734 +
45735 + read_lock(&gr_inode_lock);
45736 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
45737 + __get_dev(dentry), role);
45738 + read_unlock(&gr_inode_lock);
45739 +
45740 + if (unlikely(retval == NULL)) {
45741 + read_lock(&gr_inode_lock);
45742 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
45743 + __get_dev(real_root), role);
45744 + read_unlock(&gr_inode_lock);
45745 + }
45746 +out:
45747 + spin_unlock(&vfsmount_lock);
45748 + spin_unlock(&dcache_lock);
45749 +
45750 + BUG_ON(retval == NULL);
45751 +
45752 + return retval;
45753 +}
45754 +
45755 +static void
45756 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
45757 +{
45758 + struct task_struct *task = current;
45759 + const struct cred *cred = current_cred();
45760 +
45761 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
45762 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45763 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45764 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
45765 +
45766 + return;
45767 +}
45768 +
45769 +static void
45770 +gr_log_learn_sysctl(const char *path, const __u32 mode)
45771 +{
45772 + struct task_struct *task = current;
45773 + const struct cred *cred = current_cred();
45774 +
45775 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
45776 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45777 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45778 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
45779 +
45780 + return;
45781 +}
45782 +
45783 +static void
45784 +gr_log_learn_id_change(const char type, const unsigned int real,
45785 + const unsigned int effective, const unsigned int fs)
45786 +{
45787 + struct task_struct *task = current;
45788 + const struct cred *cred = current_cred();
45789 +
45790 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
45791 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45792 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45793 + type, real, effective, fs, &task->signal->saved_ip);
45794 +
45795 + return;
45796 +}
45797 +
45798 +__u32
45799 +gr_check_link(const struct dentry * new_dentry,
45800 + const struct dentry * parent_dentry,
45801 + const struct vfsmount * parent_mnt,
45802 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
45803 +{
45804 + struct acl_object_label *obj;
45805 + __u32 oldmode, newmode;
45806 + __u32 needmode;
45807 +
45808 + if (unlikely(!(gr_status & GR_READY)))
45809 + return (GR_CREATE | GR_LINK);
45810 +
45811 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
45812 + oldmode = obj->mode;
45813 +
45814 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45815 + oldmode |= (GR_CREATE | GR_LINK);
45816 +
45817 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
45818 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
45819 + needmode |= GR_SETID | GR_AUDIT_SETID;
45820 +
45821 + newmode =
45822 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
45823 + oldmode | needmode);
45824 +
45825 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
45826 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
45827 + GR_INHERIT | GR_AUDIT_INHERIT);
45828 +
45829 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
45830 + goto bad;
45831 +
45832 + if ((oldmode & needmode) != needmode)
45833 + goto bad;
45834 +
45835 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
45836 + if ((newmode & needmode) != needmode)
45837 + goto bad;
45838 +
45839 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
45840 + return newmode;
45841 +bad:
45842 + needmode = oldmode;
45843 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
45844 + needmode |= GR_SETID;
45845 +
45846 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
45847 + gr_log_learn(old_dentry, old_mnt, needmode);
45848 + return (GR_CREATE | GR_LINK);
45849 + } else if (newmode & GR_SUPPRESS)
45850 + return GR_SUPPRESS;
45851 + else
45852 + return 0;
45853 +}
45854 +
45855 +__u32
45856 +gr_search_file(const struct dentry * dentry, const __u32 mode,
45857 + const struct vfsmount * mnt)
45858 +{
45859 + __u32 retval = mode;
45860 + struct acl_subject_label *curracl;
45861 + struct acl_object_label *currobj;
45862 +
45863 + if (unlikely(!(gr_status & GR_READY)))
45864 + return (mode & ~GR_AUDITS);
45865 +
45866 + curracl = current->acl;
45867 +
45868 + currobj = chk_obj_label(dentry, mnt, curracl);
45869 + retval = currobj->mode & mode;
45870 +
45871 + /* if we're opening a specified transfer file for writing
45872 + (e.g. /dev/initctl), then transfer our role to init
45873 + */
45874 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
45875 + current->role->roletype & GR_ROLE_PERSIST)) {
45876 + struct task_struct *task = init_pid_ns.child_reaper;
45877 +
45878 + if (task->role != current->role) {
45879 + task->acl_sp_role = 0;
45880 + task->acl_role_id = current->acl_role_id;
45881 + task->role = current->role;
45882 + rcu_read_lock();
45883 + read_lock(&grsec_exec_file_lock);
45884 + gr_apply_subject_to_task(task);
45885 + read_unlock(&grsec_exec_file_lock);
45886 + rcu_read_unlock();
45887 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
45888 + }
45889 + }
45890 +
45891 + if (unlikely
45892 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
45893 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
45894 + __u32 new_mode = mode;
45895 +
45896 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45897 +
45898 + retval = new_mode;
45899 +
45900 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
45901 + new_mode |= GR_INHERIT;
45902 +
45903 + if (!(mode & GR_NOLEARN))
45904 + gr_log_learn(dentry, mnt, new_mode);
45905 + }
45906 +
45907 + return retval;
45908 +}
45909 +
45910 +__u32
45911 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
45912 + const struct vfsmount * mnt, const __u32 mode)
45913 +{
45914 + struct name_entry *match;
45915 + struct acl_object_label *matchpo;
45916 + struct acl_subject_label *curracl;
45917 + char *path;
45918 + __u32 retval;
45919 +
45920 + if (unlikely(!(gr_status & GR_READY)))
45921 + return (mode & ~GR_AUDITS);
45922 +
45923 + preempt_disable();
45924 + path = gr_to_filename_rbac(new_dentry, mnt);
45925 + match = lookup_name_entry_create(path);
45926 +
45927 + if (!match)
45928 + goto check_parent;
45929 +
45930 + curracl = current->acl;
45931 +
45932 + read_lock(&gr_inode_lock);
45933 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
45934 + read_unlock(&gr_inode_lock);
45935 +
45936 + if (matchpo) {
45937 + if ((matchpo->mode & mode) !=
45938 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
45939 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
45940 + __u32 new_mode = mode;
45941 +
45942 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45943 +
45944 + gr_log_learn(new_dentry, mnt, new_mode);
45945 +
45946 + preempt_enable();
45947 + return new_mode;
45948 + }
45949 + preempt_enable();
45950 + return (matchpo->mode & mode);
45951 + }
45952 +
45953 + check_parent:
45954 + curracl = current->acl;
45955 +
45956 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
45957 + retval = matchpo->mode & mode;
45958 +
45959 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
45960 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
45961 + __u32 new_mode = mode;
45962 +
45963 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45964 +
45965 + gr_log_learn(new_dentry, mnt, new_mode);
45966 + preempt_enable();
45967 + return new_mode;
45968 + }
45969 +
45970 + preempt_enable();
45971 + return retval;
45972 +}
45973 +
45974 +int
45975 +gr_check_hidden_task(const struct task_struct *task)
45976 +{
45977 + if (unlikely(!(gr_status & GR_READY)))
45978 + return 0;
45979 +
45980 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
45981 + return 1;
45982 +
45983 + return 0;
45984 +}
45985 +
45986 +int
45987 +gr_check_protected_task(const struct task_struct *task)
45988 +{
45989 + if (unlikely(!(gr_status & GR_READY) || !task))
45990 + return 0;
45991 +
45992 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
45993 + task->acl != current->acl)
45994 + return 1;
45995 +
45996 + return 0;
45997 +}
45998 +
45999 +int
46000 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
46001 +{
46002 + struct task_struct *p;
46003 + int ret = 0;
46004 +
46005 + if (unlikely(!(gr_status & GR_READY) || !pid))
46006 + return ret;
46007 +
46008 + read_lock(&tasklist_lock);
46009 + do_each_pid_task(pid, type, p) {
46010 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46011 + p->acl != current->acl) {
46012 + ret = 1;
46013 + goto out;
46014 + }
46015 + } while_each_pid_task(pid, type, p);
46016 +out:
46017 + read_unlock(&tasklist_lock);
46018 +
46019 + return ret;
46020 +}
46021 +
46022 +void
46023 +gr_copy_label(struct task_struct *tsk)
46024 +{
46025 + tsk->signal->used_accept = 0;
46026 + tsk->acl_sp_role = 0;
46027 + tsk->acl_role_id = current->acl_role_id;
46028 + tsk->acl = current->acl;
46029 + tsk->role = current->role;
46030 + tsk->signal->curr_ip = current->signal->curr_ip;
46031 + tsk->signal->saved_ip = current->signal->saved_ip;
46032 + if (current->exec_file)
46033 + get_file(current->exec_file);
46034 + tsk->exec_file = current->exec_file;
46035 + tsk->is_writable = current->is_writable;
46036 + if (unlikely(current->signal->used_accept)) {
46037 + current->signal->curr_ip = 0;
46038 + current->signal->saved_ip = 0;
46039 + }
46040 +
46041 + return;
46042 +}
46043 +
46044 +static void
46045 +gr_set_proc_res(struct task_struct *task)
46046 +{
46047 + struct acl_subject_label *proc;
46048 + unsigned short i;
46049 +
46050 + proc = task->acl;
46051 +
46052 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
46053 + return;
46054 +
46055 + for (i = 0; i < RLIM_NLIMITS; i++) {
46056 + if (!(proc->resmask & (1 << i)))
46057 + continue;
46058 +
46059 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
46060 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
46061 + }
46062 +
46063 + return;
46064 +}
46065 +
46066 +extern int __gr_process_user_ban(struct user_struct *user);
46067 +
46068 +int
46069 +gr_check_user_change(int real, int effective, int fs)
46070 +{
46071 + unsigned int i;
46072 + __u16 num;
46073 + uid_t *uidlist;
46074 + int curuid;
46075 + int realok = 0;
46076 + int effectiveok = 0;
46077 + int fsok = 0;
46078 +
46079 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
46080 + struct user_struct *user;
46081 +
46082 + if (real == -1)
46083 + goto skipit;
46084 +
46085 + user = find_user(real);
46086 + if (user == NULL)
46087 + goto skipit;
46088 +
46089 + if (__gr_process_user_ban(user)) {
46090 + /* for find_user */
46091 + free_uid(user);
46092 + return 1;
46093 + }
46094 +
46095 + /* for find_user */
46096 + free_uid(user);
46097 +
46098 +skipit:
46099 +#endif
46100 +
46101 + if (unlikely(!(gr_status & GR_READY)))
46102 + return 0;
46103 +
46104 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46105 + gr_log_learn_id_change('u', real, effective, fs);
46106 +
46107 + num = current->acl->user_trans_num;
46108 + uidlist = current->acl->user_transitions;
46109 +
46110 + if (uidlist == NULL)
46111 + return 0;
46112 +
46113 + if (real == -1)
46114 + realok = 1;
46115 + if (effective == -1)
46116 + effectiveok = 1;
46117 + if (fs == -1)
46118 + fsok = 1;
46119 +
46120 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
46121 + for (i = 0; i < num; i++) {
46122 + curuid = (int)uidlist[i];
46123 + if (real == curuid)
46124 + realok = 1;
46125 + if (effective == curuid)
46126 + effectiveok = 1;
46127 + if (fs == curuid)
46128 + fsok = 1;
46129 + }
46130 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
46131 + for (i = 0; i < num; i++) {
46132 + curuid = (int)uidlist[i];
46133 + if (real == curuid)
46134 + break;
46135 + if (effective == curuid)
46136 + break;
46137 + if (fs == curuid)
46138 + break;
46139 + }
46140 + /* not in deny list */
46141 + if (i == num) {
46142 + realok = 1;
46143 + effectiveok = 1;
46144 + fsok = 1;
46145 + }
46146 + }
46147 +
46148 + if (realok && effectiveok && fsok)
46149 + return 0;
46150 + else {
46151 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46152 + return 1;
46153 + }
46154 +}
46155 +
46156 +int
46157 +gr_check_group_change(int real, int effective, int fs)
46158 +{
46159 + unsigned int i;
46160 + __u16 num;
46161 + gid_t *gidlist;
46162 + int curgid;
46163 + int realok = 0;
46164 + int effectiveok = 0;
46165 + int fsok = 0;
46166 +
46167 + if (unlikely(!(gr_status & GR_READY)))
46168 + return 0;
46169 +
46170 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46171 + gr_log_learn_id_change('g', real, effective, fs);
46172 +
46173 + num = current->acl->group_trans_num;
46174 + gidlist = current->acl->group_transitions;
46175 +
46176 + if (gidlist == NULL)
46177 + return 0;
46178 +
46179 + if (real == -1)
46180 + realok = 1;
46181 + if (effective == -1)
46182 + effectiveok = 1;
46183 + if (fs == -1)
46184 + fsok = 1;
46185 +
46186 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
46187 + for (i = 0; i < num; i++) {
46188 + curgid = (int)gidlist[i];
46189 + if (real == curgid)
46190 + realok = 1;
46191 + if (effective == curgid)
46192 + effectiveok = 1;
46193 + if (fs == curgid)
46194 + fsok = 1;
46195 + }
46196 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
46197 + for (i = 0; i < num; i++) {
46198 + curgid = (int)gidlist[i];
46199 + if (real == curgid)
46200 + break;
46201 + if (effective == curgid)
46202 + break;
46203 + if (fs == curgid)
46204 + break;
46205 + }
46206 + /* not in deny list */
46207 + if (i == num) {
46208 + realok = 1;
46209 + effectiveok = 1;
46210 + fsok = 1;
46211 + }
46212 + }
46213 +
46214 + if (realok && effectiveok && fsok)
46215 + return 0;
46216 + else {
46217 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46218 + return 1;
46219 + }
46220 +}
46221 +
46222 +void
46223 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
46224 +{
46225 + struct acl_role_label *role = task->role;
46226 + struct acl_subject_label *subj = NULL;
46227 + struct acl_object_label *obj;
46228 + struct file *filp;
46229 +
46230 + if (unlikely(!(gr_status & GR_READY)))
46231 + return;
46232 +
46233 + filp = task->exec_file;
46234 +
46235 + /* kernel process, we'll give them the kernel role */
46236 + if (unlikely(!filp)) {
46237 + task->role = kernel_role;
46238 + task->acl = kernel_role->root_label;
46239 + return;
46240 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
46241 + role = lookup_acl_role_label(task, uid, gid);
46242 +
46243 + /* perform subject lookup in possibly new role
46244 + we can use this result below in the case where role == task->role
46245 + */
46246 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
46247 +
46248 + /* if we changed uid/gid, but result in the same role
46249 + and are using inheritance, don't lose the inherited subject
46250 + if current subject is other than what normal lookup
46251 + would result in, we arrived via inheritance, don't
46252 + lose subject
46253 + */
46254 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
46255 + (subj == task->acl)))
46256 + task->acl = subj;
46257 +
46258 + task->role = role;
46259 +
46260 + task->is_writable = 0;
46261 +
46262 + /* ignore additional mmap checks for processes that are writable
46263 + by the default ACL */
46264 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46265 + if (unlikely(obj->mode & GR_WRITE))
46266 + task->is_writable = 1;
46267 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
46268 + if (unlikely(obj->mode & GR_WRITE))
46269 + task->is_writable = 1;
46270 +
46271 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46272 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46273 +#endif
46274 +
46275 + gr_set_proc_res(task);
46276 +
46277 + return;
46278 +}
46279 +
46280 +int
46281 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
46282 + const int unsafe_share)
46283 +{
46284 + struct task_struct *task = current;
46285 + struct acl_subject_label *newacl;
46286 + struct acl_object_label *obj;
46287 + __u32 retmode;
46288 +
46289 + if (unlikely(!(gr_status & GR_READY)))
46290 + return 0;
46291 +
46292 + newacl = chk_subj_label(dentry, mnt, task->role);
46293 +
46294 + task_lock(task);
46295 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
46296 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
46297 + !(task->role->roletype & GR_ROLE_GOD) &&
46298 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
46299 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
46300 + task_unlock(task);
46301 + if (unsafe_share)
46302 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
46303 + else
46304 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
46305 + return -EACCES;
46306 + }
46307 + task_unlock(task);
46308 +
46309 + obj = chk_obj_label(dentry, mnt, task->acl);
46310 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
46311 +
46312 + if (!(task->acl->mode & GR_INHERITLEARN) &&
46313 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
46314 + if (obj->nested)
46315 + task->acl = obj->nested;
46316 + else
46317 + task->acl = newacl;
46318 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
46319 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
46320 +
46321 + task->is_writable = 0;
46322 +
46323 + /* ignore additional mmap checks for processes that are writable
46324 + by the default ACL */
46325 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
46326 + if (unlikely(obj->mode & GR_WRITE))
46327 + task->is_writable = 1;
46328 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
46329 + if (unlikely(obj->mode & GR_WRITE))
46330 + task->is_writable = 1;
46331 +
46332 + gr_set_proc_res(task);
46333 +
46334 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46335 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46336 +#endif
46337 + return 0;
46338 +}
46339 +
46340 +/* always called with valid inodev ptr */
46341 +static void
46342 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
46343 +{
46344 + struct acl_object_label *matchpo;
46345 + struct acl_subject_label *matchps;
46346 + struct acl_subject_label *subj;
46347 + struct acl_role_label *role;
46348 + unsigned int x;
46349 +
46350 + FOR_EACH_ROLE_START(role)
46351 + FOR_EACH_SUBJECT_START(role, subj, x)
46352 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
46353 + matchpo->mode |= GR_DELETED;
46354 + FOR_EACH_SUBJECT_END(subj,x)
46355 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
46356 + if (subj->inode == ino && subj->device == dev)
46357 + subj->mode |= GR_DELETED;
46358 + FOR_EACH_NESTED_SUBJECT_END(subj)
46359 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
46360 + matchps->mode |= GR_DELETED;
46361 + FOR_EACH_ROLE_END(role)
46362 +
46363 + inodev->nentry->deleted = 1;
46364 +
46365 + return;
46366 +}
46367 +
46368 +void
46369 +gr_handle_delete(const ino_t ino, const dev_t dev)
46370 +{
46371 + struct inodev_entry *inodev;
46372 +
46373 + if (unlikely(!(gr_status & GR_READY)))
46374 + return;
46375 +
46376 + write_lock(&gr_inode_lock);
46377 + inodev = lookup_inodev_entry(ino, dev);
46378 + if (inodev != NULL)
46379 + do_handle_delete(inodev, ino, dev);
46380 + write_unlock(&gr_inode_lock);
46381 +
46382 + return;
46383 +}
46384 +
46385 +static void
46386 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
46387 + const ino_t newinode, const dev_t newdevice,
46388 + struct acl_subject_label *subj)
46389 +{
46390 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
46391 + struct acl_object_label *match;
46392 +
46393 + match = subj->obj_hash[index];
46394 +
46395 + while (match && (match->inode != oldinode ||
46396 + match->device != olddevice ||
46397 + !(match->mode & GR_DELETED)))
46398 + match = match->next;
46399 +
46400 + if (match && (match->inode == oldinode)
46401 + && (match->device == olddevice)
46402 + && (match->mode & GR_DELETED)) {
46403 + if (match->prev == NULL) {
46404 + subj->obj_hash[index] = match->next;
46405 + if (match->next != NULL)
46406 + match->next->prev = NULL;
46407 + } else {
46408 + match->prev->next = match->next;
46409 + if (match->next != NULL)
46410 + match->next->prev = match->prev;
46411 + }
46412 + match->prev = NULL;
46413 + match->next = NULL;
46414 + match->inode = newinode;
46415 + match->device = newdevice;
46416 + match->mode &= ~GR_DELETED;
46417 +
46418 + insert_acl_obj_label(match, subj);
46419 + }
46420 +
46421 + return;
46422 +}
46423 +
46424 +static void
46425 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
46426 + const ino_t newinode, const dev_t newdevice,
46427 + struct acl_role_label *role)
46428 +{
46429 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
46430 + struct acl_subject_label *match;
46431 +
46432 + match = role->subj_hash[index];
46433 +
46434 + while (match && (match->inode != oldinode ||
46435 + match->device != olddevice ||
46436 + !(match->mode & GR_DELETED)))
46437 + match = match->next;
46438 +
46439 + if (match && (match->inode == oldinode)
46440 + && (match->device == olddevice)
46441 + && (match->mode & GR_DELETED)) {
46442 + if (match->prev == NULL) {
46443 + role->subj_hash[index] = match->next;
46444 + if (match->next != NULL)
46445 + match->next->prev = NULL;
46446 + } else {
46447 + match->prev->next = match->next;
46448 + if (match->next != NULL)
46449 + match->next->prev = match->prev;
46450 + }
46451 + match->prev = NULL;
46452 + match->next = NULL;
46453 + match->inode = newinode;
46454 + match->device = newdevice;
46455 + match->mode &= ~GR_DELETED;
46456 +
46457 + insert_acl_subj_label(match, role);
46458 + }
46459 +
46460 + return;
46461 +}
46462 +
46463 +static void
46464 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
46465 + const ino_t newinode, const dev_t newdevice)
46466 +{
46467 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
46468 + struct inodev_entry *match;
46469 +
46470 + match = inodev_set.i_hash[index];
46471 +
46472 + while (match && (match->nentry->inode != oldinode ||
46473 + match->nentry->device != olddevice || !match->nentry->deleted))
46474 + match = match->next;
46475 +
46476 + if (match && (match->nentry->inode == oldinode)
46477 + && (match->nentry->device == olddevice) &&
46478 + match->nentry->deleted) {
46479 + if (match->prev == NULL) {
46480 + inodev_set.i_hash[index] = match->next;
46481 + if (match->next != NULL)
46482 + match->next->prev = NULL;
46483 + } else {
46484 + match->prev->next = match->next;
46485 + if (match->next != NULL)
46486 + match->next->prev = match->prev;
46487 + }
46488 + match->prev = NULL;
46489 + match->next = NULL;
46490 + match->nentry->inode = newinode;
46491 + match->nentry->device = newdevice;
46492 + match->nentry->deleted = 0;
46493 +
46494 + insert_inodev_entry(match);
46495 + }
46496 +
46497 + return;
46498 +}
46499 +
46500 +static void
46501 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
46502 + const struct vfsmount *mnt)
46503 +{
46504 + struct acl_subject_label *subj;
46505 + struct acl_role_label *role;
46506 + unsigned int x;
46507 + ino_t inode = dentry->d_inode->i_ino;
46508 + dev_t dev = __get_dev(dentry);
46509 +
46510 + FOR_EACH_ROLE_START(role)
46511 + update_acl_subj_label(matchn->inode, matchn->device,
46512 + inode, dev, role);
46513 +
46514 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
46515 + if ((subj->inode == inode) && (subj->device == dev)) {
46516 + subj->inode = inode;
46517 + subj->device = dev;
46518 + }
46519 + FOR_EACH_NESTED_SUBJECT_END(subj)
46520 + FOR_EACH_SUBJECT_START(role, subj, x)
46521 + update_acl_obj_label(matchn->inode, matchn->device,
46522 + inode, dev, subj);
46523 + FOR_EACH_SUBJECT_END(subj,x)
46524 + FOR_EACH_ROLE_END(role)
46525 +
46526 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
46527 +
46528 + return;
46529 +}
46530 +
46531 +void
46532 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
46533 +{
46534 + struct name_entry *matchn;
46535 +
46536 + if (unlikely(!(gr_status & GR_READY)))
46537 + return;
46538 +
46539 + preempt_disable();
46540 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
46541 +
46542 + if (unlikely((unsigned long)matchn)) {
46543 + write_lock(&gr_inode_lock);
46544 + do_handle_create(matchn, dentry, mnt);
46545 + write_unlock(&gr_inode_lock);
46546 + }
46547 + preempt_enable();
46548 +
46549 + return;
46550 +}
46551 +
46552 +void
46553 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
46554 + struct dentry *old_dentry,
46555 + struct dentry *new_dentry,
46556 + struct vfsmount *mnt, const __u8 replace)
46557 +{
46558 + struct name_entry *matchn;
46559 + struct inodev_entry *inodev;
46560 + ino_t oldinode = old_dentry->d_inode->i_ino;
46561 + dev_t olddev = __get_dev(old_dentry);
46562 +
46563 + /* vfs_rename swaps the name and parent link for old_dentry and
46564 + new_dentry
46565 + at this point, old_dentry has the new name, parent link, and inode
46566 + for the renamed file
46567 + if a file is being replaced by a rename, new_dentry has the inode
46568 + and name for the replaced file
46569 + */
46570 +
46571 + if (unlikely(!(gr_status & GR_READY)))
46572 + return;
46573 +
46574 + preempt_disable();
46575 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
46576 +
46577 + /* we wouldn't have to check d_inode if it weren't for
46578 + NFS silly-renaming
46579 + */
46580 +
46581 + write_lock(&gr_inode_lock);
46582 + if (unlikely(replace && new_dentry->d_inode)) {
46583 + ino_t newinode = new_dentry->d_inode->i_ino;
46584 + dev_t newdev = __get_dev(new_dentry);
46585 + inodev = lookup_inodev_entry(newinode, newdev);
46586 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
46587 + do_handle_delete(inodev, newinode, newdev);
46588 + }
46589 +
46590 + inodev = lookup_inodev_entry(oldinode, olddev);
46591 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
46592 + do_handle_delete(inodev, oldinode, olddev);
46593 +
46594 + if (unlikely((unsigned long)matchn))
46595 + do_handle_create(matchn, old_dentry, mnt);
46596 +
46597 + write_unlock(&gr_inode_lock);
46598 + preempt_enable();
46599 +
46600 + return;
46601 +}
46602 +
46603 +static int
46604 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
46605 + unsigned char **sum)
46606 +{
46607 + struct acl_role_label *r;
46608 + struct role_allowed_ip *ipp;
46609 + struct role_transition *trans;
46610 + unsigned int i;
46611 + int found = 0;
46612 + u32 curr_ip = current->signal->curr_ip;
46613 +
46614 + current->signal->saved_ip = curr_ip;
46615 +
46616 + /* check transition table */
46617 +
46618 + for (trans = current->role->transitions; trans; trans = trans->next) {
46619 + if (!strcmp(rolename, trans->rolename)) {
46620 + found = 1;
46621 + break;
46622 + }
46623 + }
46624 +
46625 + if (!found)
46626 + return 0;
46627 +
46628 + /* handle special roles that do not require authentication
46629 + and check ip */
46630 +
46631 + FOR_EACH_ROLE_START(r)
46632 + if (!strcmp(rolename, r->rolename) &&
46633 + (r->roletype & GR_ROLE_SPECIAL)) {
46634 + found = 0;
46635 + if (r->allowed_ips != NULL) {
46636 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
46637 + if ((ntohl(curr_ip) & ipp->netmask) ==
46638 + (ntohl(ipp->addr) & ipp->netmask))
46639 + found = 1;
46640 + }
46641 + } else
46642 + found = 2;
46643 + if (!found)
46644 + return 0;
46645 +
46646 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
46647 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
46648 + *salt = NULL;
46649 + *sum = NULL;
46650 + return 1;
46651 + }
46652 + }
46653 + FOR_EACH_ROLE_END(r)
46654 +
46655 + for (i = 0; i < num_sprole_pws; i++) {
46656 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
46657 + *salt = acl_special_roles[i]->salt;
46658 + *sum = acl_special_roles[i]->sum;
46659 + return 1;
46660 + }
46661 + }
46662 +
46663 + return 0;
46664 +}
46665 +
46666 +static void
46667 +assign_special_role(char *rolename)
46668 +{
46669 + struct acl_object_label *obj;
46670 + struct acl_role_label *r;
46671 + struct acl_role_label *assigned = NULL;
46672 + struct task_struct *tsk;
46673 + struct file *filp;
46674 +
46675 + FOR_EACH_ROLE_START(r)
46676 + if (!strcmp(rolename, r->rolename) &&
46677 + (r->roletype & GR_ROLE_SPECIAL)) {
46678 + assigned = r;
46679 + break;
46680 + }
46681 + FOR_EACH_ROLE_END(r)
46682 +
46683 + if (!assigned)
46684 + return;
46685 +
46686 + read_lock(&tasklist_lock);
46687 + read_lock(&grsec_exec_file_lock);
46688 +
46689 + tsk = current->real_parent;
46690 + if (tsk == NULL)
46691 + goto out_unlock;
46692 +
46693 + filp = tsk->exec_file;
46694 + if (filp == NULL)
46695 + goto out_unlock;
46696 +
46697 + tsk->is_writable = 0;
46698 +
46699 + tsk->acl_sp_role = 1;
46700 + tsk->acl_role_id = ++acl_sp_role_value;
46701 + tsk->role = assigned;
46702 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
46703 +
46704 + /* ignore additional mmap checks for processes that are writable
46705 + by the default ACL */
46706 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46707 + if (unlikely(obj->mode & GR_WRITE))
46708 + tsk->is_writable = 1;
46709 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
46710 + if (unlikely(obj->mode & GR_WRITE))
46711 + tsk->is_writable = 1;
46712 +
46713 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46714 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
46715 +#endif
46716 +
46717 +out_unlock:
46718 + read_unlock(&grsec_exec_file_lock);
46719 + read_unlock(&tasklist_lock);
46720 + return;
46721 +}
46722 +
46723 +int gr_check_secure_terminal(struct task_struct *task)
46724 +{
46725 + struct task_struct *p, *p2, *p3;
46726 + struct files_struct *files;
46727 + struct fdtable *fdt;
46728 + struct file *our_file = NULL, *file;
46729 + int i;
46730 +
46731 + if (task->signal->tty == NULL)
46732 + return 1;
46733 +
46734 + files = get_files_struct(task);
46735 + if (files != NULL) {
46736 + rcu_read_lock();
46737 + fdt = files_fdtable(files);
46738 + for (i=0; i < fdt->max_fds; i++) {
46739 + file = fcheck_files(files, i);
46740 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
46741 + get_file(file);
46742 + our_file = file;
46743 + }
46744 + }
46745 + rcu_read_unlock();
46746 + put_files_struct(files);
46747 + }
46748 +
46749 + if (our_file == NULL)
46750 + return 1;
46751 +
46752 + read_lock(&tasklist_lock);
46753 + do_each_thread(p2, p) {
46754 + files = get_files_struct(p);
46755 + if (files == NULL ||
46756 + (p->signal && p->signal->tty == task->signal->tty)) {
46757 + if (files != NULL)
46758 + put_files_struct(files);
46759 + continue;
46760 + }
46761 + rcu_read_lock();
46762 + fdt = files_fdtable(files);
46763 + for (i=0; i < fdt->max_fds; i++) {
46764 + file = fcheck_files(files, i);
46765 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
46766 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
46767 + p3 = task;
46768 + while (p3->pid > 0) {
46769 + if (p3 == p)
46770 + break;
46771 + p3 = p3->real_parent;
46772 + }
46773 + if (p3 == p)
46774 + break;
46775 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
46776 + gr_handle_alertkill(p);
46777 + rcu_read_unlock();
46778 + put_files_struct(files);
46779 + read_unlock(&tasklist_lock);
46780 + fput(our_file);
46781 + return 0;
46782 + }
46783 + }
46784 + rcu_read_unlock();
46785 + put_files_struct(files);
46786 + } while_each_thread(p2, p);
46787 + read_unlock(&tasklist_lock);
46788 +
46789 + fput(our_file);
46790 + return 1;
46791 +}
46792 +
46793 +ssize_t
46794 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
46795 +{
46796 + struct gr_arg_wrapper uwrap;
46797 + unsigned char *sprole_salt = NULL;
46798 + unsigned char *sprole_sum = NULL;
46799 + int error = sizeof (struct gr_arg_wrapper);
46800 + int error2 = 0;
46801 +
46802 + mutex_lock(&gr_dev_mutex);
46803 +
46804 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
46805 + error = -EPERM;
46806 + goto out;
46807 + }
46808 +
46809 + if (count != sizeof (struct gr_arg_wrapper)) {
46810 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
46811 + error = -EINVAL;
46812 + goto out;
46813 + }
46814 +
46815 +
46816 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
46817 + gr_auth_expires = 0;
46818 + gr_auth_attempts = 0;
46819 + }
46820 +
46821 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
46822 + error = -EFAULT;
46823 + goto out;
46824 + }
46825 +
46826 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
46827 + error = -EINVAL;
46828 + goto out;
46829 + }
46830 +
46831 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
46832 + error = -EFAULT;
46833 + goto out;
46834 + }
46835 +
46836 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
46837 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
46838 + time_after(gr_auth_expires, get_seconds())) {
46839 + error = -EBUSY;
46840 + goto out;
46841 + }
46842 +
46843 + /* if non-root trying to do anything other than use a special role,
46844 + do not attempt authentication, do not count towards authentication
46845 + locking
46846 + */
46847 +
46848 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
46849 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
46850 + current_uid()) {
46851 + error = -EPERM;
46852 + goto out;
46853 + }
46854 +
46855 + /* ensure pw and special role name are null terminated */
46856 +
46857 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
46858 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
46859 +
46860 + /* Okay.
46861 + * We have our enough of the argument structure..(we have yet
46862 + * to copy_from_user the tables themselves) . Copy the tables
46863 + * only if we need them, i.e. for loading operations. */
46864 +
46865 + switch (gr_usermode->mode) {
46866 + case GR_STATUS:
46867 + if (gr_status & GR_READY) {
46868 + error = 1;
46869 + if (!gr_check_secure_terminal(current))
46870 + error = 3;
46871 + } else
46872 + error = 2;
46873 + goto out;
46874 + case GR_SHUTDOWN:
46875 + if ((gr_status & GR_READY)
46876 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46877 + pax_open_kernel();
46878 + gr_status &= ~GR_READY;
46879 + pax_close_kernel();
46880 +
46881 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
46882 + free_variables();
46883 + memset(gr_usermode, 0, sizeof (struct gr_arg));
46884 + memset(gr_system_salt, 0, GR_SALT_LEN);
46885 + memset(gr_system_sum, 0, GR_SHA_LEN);
46886 + } else if (gr_status & GR_READY) {
46887 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
46888 + error = -EPERM;
46889 + } else {
46890 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
46891 + error = -EAGAIN;
46892 + }
46893 + break;
46894 + case GR_ENABLE:
46895 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
46896 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
46897 + else {
46898 + if (gr_status & GR_READY)
46899 + error = -EAGAIN;
46900 + else
46901 + error = error2;
46902 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
46903 + }
46904 + break;
46905 + case GR_RELOAD:
46906 + if (!(gr_status & GR_READY)) {
46907 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
46908 + error = -EAGAIN;
46909 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46910 + lock_kernel();
46911 +
46912 + pax_open_kernel();
46913 + gr_status &= ~GR_READY;
46914 + pax_close_kernel();
46915 +
46916 + free_variables();
46917 + if (!(error2 = gracl_init(gr_usermode))) {
46918 + unlock_kernel();
46919 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
46920 + } else {
46921 + unlock_kernel();
46922 + error = error2;
46923 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
46924 + }
46925 + } else {
46926 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
46927 + error = -EPERM;
46928 + }
46929 + break;
46930 + case GR_SEGVMOD:
46931 + if (unlikely(!(gr_status & GR_READY))) {
46932 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
46933 + error = -EAGAIN;
46934 + break;
46935 + }
46936 +
46937 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46938 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
46939 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
46940 + struct acl_subject_label *segvacl;
46941 + segvacl =
46942 + lookup_acl_subj_label(gr_usermode->segv_inode,
46943 + gr_usermode->segv_device,
46944 + current->role);
46945 + if (segvacl) {
46946 + segvacl->crashes = 0;
46947 + segvacl->expires = 0;
46948 + }
46949 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
46950 + gr_remove_uid(gr_usermode->segv_uid);
46951 + }
46952 + } else {
46953 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
46954 + error = -EPERM;
46955 + }
46956 + break;
46957 + case GR_SPROLE:
46958 + case GR_SPROLEPAM:
46959 + if (unlikely(!(gr_status & GR_READY))) {
46960 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
46961 + error = -EAGAIN;
46962 + break;
46963 + }
46964 +
46965 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
46966 + current->role->expires = 0;
46967 + current->role->auth_attempts = 0;
46968 + }
46969 +
46970 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
46971 + time_after(current->role->expires, get_seconds())) {
46972 + error = -EBUSY;
46973 + goto out;
46974 + }
46975 +
46976 + if (lookup_special_role_auth
46977 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
46978 + && ((!sprole_salt && !sprole_sum)
46979 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
46980 + char *p = "";
46981 + assign_special_role(gr_usermode->sp_role);
46982 + read_lock(&tasklist_lock);
46983 + if (current->real_parent)
46984 + p = current->real_parent->role->rolename;
46985 + read_unlock(&tasklist_lock);
46986 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
46987 + p, acl_sp_role_value);
46988 + } else {
46989 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
46990 + error = -EPERM;
46991 + if(!(current->role->auth_attempts++))
46992 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
46993 +
46994 + goto out;
46995 + }
46996 + break;
46997 + case GR_UNSPROLE:
46998 + if (unlikely(!(gr_status & GR_READY))) {
46999 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
47000 + error = -EAGAIN;
47001 + break;
47002 + }
47003 +
47004 + if (current->role->roletype & GR_ROLE_SPECIAL) {
47005 + char *p = "";
47006 + int i = 0;
47007 +
47008 + read_lock(&tasklist_lock);
47009 + if (current->real_parent) {
47010 + p = current->real_parent->role->rolename;
47011 + i = current->real_parent->acl_role_id;
47012 + }
47013 + read_unlock(&tasklist_lock);
47014 +
47015 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
47016 + gr_set_acls(1);
47017 + } else {
47018 + error = -EPERM;
47019 + goto out;
47020 + }
47021 + break;
47022 + default:
47023 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
47024 + error = -EINVAL;
47025 + break;
47026 + }
47027 +
47028 + if (error != -EPERM)
47029 + goto out;
47030 +
47031 + if(!(gr_auth_attempts++))
47032 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47033 +
47034 + out:
47035 + mutex_unlock(&gr_dev_mutex);
47036 + return error;
47037 +}
47038 +
47039 +/* must be called with
47040 + rcu_read_lock();
47041 + read_lock(&tasklist_lock);
47042 + read_lock(&grsec_exec_file_lock);
47043 +*/
47044 +int gr_apply_subject_to_task(struct task_struct *task)
47045 +{
47046 + struct acl_object_label *obj;
47047 + char *tmpname;
47048 + struct acl_subject_label *tmpsubj;
47049 + struct file *filp;
47050 + struct name_entry *nmatch;
47051 +
47052 + filp = task->exec_file;
47053 + if (filp == NULL)
47054 + return 0;
47055 +
47056 + /* the following is to apply the correct subject
47057 + on binaries running when the RBAC system
47058 + is enabled, when the binaries have been
47059 + replaced or deleted since their execution
47060 + -----
47061 + when the RBAC system starts, the inode/dev
47062 + from exec_file will be one the RBAC system
47063 + is unaware of. It only knows the inode/dev
47064 + of the present file on disk, or the absence
47065 + of it.
47066 + */
47067 + preempt_disable();
47068 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
47069 +
47070 + nmatch = lookup_name_entry(tmpname);
47071 + preempt_enable();
47072 + tmpsubj = NULL;
47073 + if (nmatch) {
47074 + if (nmatch->deleted)
47075 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
47076 + else
47077 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
47078 + if (tmpsubj != NULL)
47079 + task->acl = tmpsubj;
47080 + }
47081 + if (tmpsubj == NULL)
47082 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
47083 + task->role);
47084 + if (task->acl) {
47085 + struct acl_subject_label *curr;
47086 + curr = task->acl;
47087 +
47088 + task->is_writable = 0;
47089 + /* ignore additional mmap checks for processes that are writable
47090 + by the default ACL */
47091 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47092 + if (unlikely(obj->mode & GR_WRITE))
47093 + task->is_writable = 1;
47094 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47095 + if (unlikely(obj->mode & GR_WRITE))
47096 + task->is_writable = 1;
47097 +
47098 + gr_set_proc_res(task);
47099 +
47100 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47101 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47102 +#endif
47103 + } else {
47104 + return 1;
47105 + }
47106 +
47107 + return 0;
47108 +}
47109 +
47110 +int
47111 +gr_set_acls(const int type)
47112 +{
47113 + struct task_struct *task, *task2;
47114 + struct acl_role_label *role = current->role;
47115 + __u16 acl_role_id = current->acl_role_id;
47116 + const struct cred *cred;
47117 + int ret;
47118 +
47119 + rcu_read_lock();
47120 + read_lock(&tasklist_lock);
47121 + read_lock(&grsec_exec_file_lock);
47122 + do_each_thread(task2, task) {
47123 + /* check to see if we're called from the exit handler,
47124 + if so, only replace ACLs that have inherited the admin
47125 + ACL */
47126 +
47127 + if (type && (task->role != role ||
47128 + task->acl_role_id != acl_role_id))
47129 + continue;
47130 +
47131 + task->acl_role_id = 0;
47132 + task->acl_sp_role = 0;
47133 +
47134 + if (task->exec_file) {
47135 + cred = __task_cred(task);
47136 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
47137 +
47138 + ret = gr_apply_subject_to_task(task);
47139 + if (ret) {
47140 + read_unlock(&grsec_exec_file_lock);
47141 + read_unlock(&tasklist_lock);
47142 + rcu_read_unlock();
47143 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
47144 + return ret;
47145 + }
47146 + } else {
47147 + // it's a kernel process
47148 + task->role = kernel_role;
47149 + task->acl = kernel_role->root_label;
47150 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
47151 + task->acl->mode &= ~GR_PROCFIND;
47152 +#endif
47153 + }
47154 + } while_each_thread(task2, task);
47155 + read_unlock(&grsec_exec_file_lock);
47156 + read_unlock(&tasklist_lock);
47157 + rcu_read_unlock();
47158 +
47159 + return 0;
47160 +}
47161 +
47162 +void
47163 +gr_learn_resource(const struct task_struct *task,
47164 + const int res, const unsigned long wanted, const int gt)
47165 +{
47166 + struct acl_subject_label *acl;
47167 + const struct cred *cred;
47168 +
47169 + if (unlikely((gr_status & GR_READY) &&
47170 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
47171 + goto skip_reslog;
47172 +
47173 +#ifdef CONFIG_GRKERNSEC_RESLOG
47174 + gr_log_resource(task, res, wanted, gt);
47175 +#endif
47176 + skip_reslog:
47177 +
47178 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
47179 + return;
47180 +
47181 + acl = task->acl;
47182 +
47183 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
47184 + !(acl->resmask & (1 << (unsigned short) res))))
47185 + return;
47186 +
47187 + if (wanted >= acl->res[res].rlim_cur) {
47188 + unsigned long res_add;
47189 +
47190 + res_add = wanted;
47191 + switch (res) {
47192 + case RLIMIT_CPU:
47193 + res_add += GR_RLIM_CPU_BUMP;
47194 + break;
47195 + case RLIMIT_FSIZE:
47196 + res_add += GR_RLIM_FSIZE_BUMP;
47197 + break;
47198 + case RLIMIT_DATA:
47199 + res_add += GR_RLIM_DATA_BUMP;
47200 + break;
47201 + case RLIMIT_STACK:
47202 + res_add += GR_RLIM_STACK_BUMP;
47203 + break;
47204 + case RLIMIT_CORE:
47205 + res_add += GR_RLIM_CORE_BUMP;
47206 + break;
47207 + case RLIMIT_RSS:
47208 + res_add += GR_RLIM_RSS_BUMP;
47209 + break;
47210 + case RLIMIT_NPROC:
47211 + res_add += GR_RLIM_NPROC_BUMP;
47212 + break;
47213 + case RLIMIT_NOFILE:
47214 + res_add += GR_RLIM_NOFILE_BUMP;
47215 + break;
47216 + case RLIMIT_MEMLOCK:
47217 + res_add += GR_RLIM_MEMLOCK_BUMP;
47218 + break;
47219 + case RLIMIT_AS:
47220 + res_add += GR_RLIM_AS_BUMP;
47221 + break;
47222 + case RLIMIT_LOCKS:
47223 + res_add += GR_RLIM_LOCKS_BUMP;
47224 + break;
47225 + case RLIMIT_SIGPENDING:
47226 + res_add += GR_RLIM_SIGPENDING_BUMP;
47227 + break;
47228 + case RLIMIT_MSGQUEUE:
47229 + res_add += GR_RLIM_MSGQUEUE_BUMP;
47230 + break;
47231 + case RLIMIT_NICE:
47232 + res_add += GR_RLIM_NICE_BUMP;
47233 + break;
47234 + case RLIMIT_RTPRIO:
47235 + res_add += GR_RLIM_RTPRIO_BUMP;
47236 + break;
47237 + case RLIMIT_RTTIME:
47238 + res_add += GR_RLIM_RTTIME_BUMP;
47239 + break;
47240 + }
47241 +
47242 + acl->res[res].rlim_cur = res_add;
47243 +
47244 + if (wanted > acl->res[res].rlim_max)
47245 + acl->res[res].rlim_max = res_add;
47246 +
47247 + /* only log the subject filename, since resource logging is supported for
47248 + single-subject learning only */
47249 + rcu_read_lock();
47250 + cred = __task_cred(task);
47251 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47252 + task->role->roletype, cred->uid, cred->gid, acl->filename,
47253 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
47254 + "", (unsigned long) res, &task->signal->saved_ip);
47255 + rcu_read_unlock();
47256 + }
47257 +
47258 + return;
47259 +}
47260 +
47261 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
47262 +void
47263 +pax_set_initial_flags(struct linux_binprm *bprm)
47264 +{
47265 + struct task_struct *task = current;
47266 + struct acl_subject_label *proc;
47267 + unsigned long flags;
47268 +
47269 + if (unlikely(!(gr_status & GR_READY)))
47270 + return;
47271 +
47272 + flags = pax_get_flags(task);
47273 +
47274 + proc = task->acl;
47275 +
47276 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
47277 + flags &= ~MF_PAX_PAGEEXEC;
47278 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
47279 + flags &= ~MF_PAX_SEGMEXEC;
47280 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
47281 + flags &= ~MF_PAX_RANDMMAP;
47282 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
47283 + flags &= ~MF_PAX_EMUTRAMP;
47284 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
47285 + flags &= ~MF_PAX_MPROTECT;
47286 +
47287 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
47288 + flags |= MF_PAX_PAGEEXEC;
47289 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
47290 + flags |= MF_PAX_SEGMEXEC;
47291 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
47292 + flags |= MF_PAX_RANDMMAP;
47293 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
47294 + flags |= MF_PAX_EMUTRAMP;
47295 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
47296 + flags |= MF_PAX_MPROTECT;
47297 +
47298 + pax_set_flags(task, flags);
47299 +
47300 + return;
47301 +}
47302 +#endif
47303 +
47304 +#ifdef CONFIG_SYSCTL
47305 +/* Eric Biederman likes breaking userland ABI and every inode-based security
47306 + system to save 35kb of memory */
47307 +
47308 +/* we modify the passed in filename, but adjust it back before returning */
47309 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
47310 +{
47311 + struct name_entry *nmatch;
47312 + char *p, *lastp = NULL;
47313 + struct acl_object_label *obj = NULL, *tmp;
47314 + struct acl_subject_label *tmpsubj;
47315 + char c = '\0';
47316 +
47317 + read_lock(&gr_inode_lock);
47318 +
47319 + p = name + len - 1;
47320 + do {
47321 + nmatch = lookup_name_entry(name);
47322 + if (lastp != NULL)
47323 + *lastp = c;
47324 +
47325 + if (nmatch == NULL)
47326 + goto next_component;
47327 + tmpsubj = current->acl;
47328 + do {
47329 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
47330 + if (obj != NULL) {
47331 + tmp = obj->globbed;
47332 + while (tmp) {
47333 + if (!glob_match(tmp->filename, name)) {
47334 + obj = tmp;
47335 + goto found_obj;
47336 + }
47337 + tmp = tmp->next;
47338 + }
47339 + goto found_obj;
47340 + }
47341 + } while ((tmpsubj = tmpsubj->parent_subject));
47342 +next_component:
47343 + /* end case */
47344 + if (p == name)
47345 + break;
47346 +
47347 + while (*p != '/')
47348 + p--;
47349 + if (p == name)
47350 + lastp = p + 1;
47351 + else {
47352 + lastp = p;
47353 + p--;
47354 + }
47355 + c = *lastp;
47356 + *lastp = '\0';
47357 + } while (1);
47358 +found_obj:
47359 + read_unlock(&gr_inode_lock);
47360 + /* obj returned will always be non-null */
47361 + return obj;
47362 +}
47363 +
47364 +/* returns 0 when allowing, non-zero on error
47365 + op of 0 is used for readdir, so we don't log the names of hidden files
47366 +*/
47367 +__u32
47368 +gr_handle_sysctl(const struct ctl_table *table, const int op)
47369 +{
47370 + ctl_table *tmp;
47371 + const char *proc_sys = "/proc/sys";
47372 + char *path;
47373 + struct acl_object_label *obj;
47374 + unsigned short len = 0, pos = 0, depth = 0, i;
47375 + __u32 err = 0;
47376 + __u32 mode = 0;
47377 +
47378 + if (unlikely(!(gr_status & GR_READY)))
47379 + return 0;
47380 +
47381 + /* for now, ignore operations on non-sysctl entries if it's not a
47382 + readdir*/
47383 + if (table->child != NULL && op != 0)
47384 + return 0;
47385 +
47386 + mode |= GR_FIND;
47387 + /* it's only a read if it's an entry, read on dirs is for readdir */
47388 + if (op & MAY_READ)
47389 + mode |= GR_READ;
47390 + if (op & MAY_WRITE)
47391 + mode |= GR_WRITE;
47392 +
47393 + preempt_disable();
47394 +
47395 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47396 +
47397 + /* it's only a read/write if it's an actual entry, not a dir
47398 + (which are opened for readdir)
47399 + */
47400 +
47401 + /* convert the requested sysctl entry into a pathname */
47402 +
47403 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47404 + len += strlen(tmp->procname);
47405 + len++;
47406 + depth++;
47407 + }
47408 +
47409 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
47410 + /* deny */
47411 + goto out;
47412 + }
47413 +
47414 + memset(path, 0, PAGE_SIZE);
47415 +
47416 + memcpy(path, proc_sys, strlen(proc_sys));
47417 +
47418 + pos += strlen(proc_sys);
47419 +
47420 + for (; depth > 0; depth--) {
47421 + path[pos] = '/';
47422 + pos++;
47423 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47424 + if (depth == i) {
47425 + memcpy(path + pos, tmp->procname,
47426 + strlen(tmp->procname));
47427 + pos += strlen(tmp->procname);
47428 + }
47429 + i++;
47430 + }
47431 + }
47432 +
47433 + obj = gr_lookup_by_name(path, pos);
47434 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
47435 +
47436 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
47437 + ((err & mode) != mode))) {
47438 + __u32 new_mode = mode;
47439 +
47440 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47441 +
47442 + err = 0;
47443 + gr_log_learn_sysctl(path, new_mode);
47444 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
47445 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
47446 + err = -ENOENT;
47447 + } else if (!(err & GR_FIND)) {
47448 + err = -ENOENT;
47449 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
47450 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
47451 + path, (mode & GR_READ) ? " reading" : "",
47452 + (mode & GR_WRITE) ? " writing" : "");
47453 + err = -EACCES;
47454 + } else if ((err & mode) != mode) {
47455 + err = -EACCES;
47456 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
47457 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
47458 + path, (mode & GR_READ) ? " reading" : "",
47459 + (mode & GR_WRITE) ? " writing" : "");
47460 + err = 0;
47461 + } else
47462 + err = 0;
47463 +
47464 + out:
47465 + preempt_enable();
47466 +
47467 + return err;
47468 +}
47469 +#endif
47470 +
47471 +int
47472 +gr_handle_proc_ptrace(struct task_struct *task)
47473 +{
47474 + struct file *filp;
47475 + struct task_struct *tmp = task;
47476 + struct task_struct *curtemp = current;
47477 + __u32 retmode;
47478 +
47479 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47480 + if (unlikely(!(gr_status & GR_READY)))
47481 + return 0;
47482 +#endif
47483 +
47484 + read_lock(&tasklist_lock);
47485 + read_lock(&grsec_exec_file_lock);
47486 + filp = task->exec_file;
47487 +
47488 + while (tmp->pid > 0) {
47489 + if (tmp == curtemp)
47490 + break;
47491 + tmp = tmp->real_parent;
47492 + }
47493 +
47494 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47495 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
47496 + read_unlock(&grsec_exec_file_lock);
47497 + read_unlock(&tasklist_lock);
47498 + return 1;
47499 + }
47500 +
47501 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47502 + if (!(gr_status & GR_READY)) {
47503 + read_unlock(&grsec_exec_file_lock);
47504 + read_unlock(&tasklist_lock);
47505 + return 0;
47506 + }
47507 +#endif
47508 +
47509 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
47510 + read_unlock(&grsec_exec_file_lock);
47511 + read_unlock(&tasklist_lock);
47512 +
47513 + if (retmode & GR_NOPTRACE)
47514 + return 1;
47515 +
47516 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
47517 + && (current->acl != task->acl || (current->acl != current->role->root_label
47518 + && current->pid != task->pid)))
47519 + return 1;
47520 +
47521 + return 0;
47522 +}
47523 +
47524 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
47525 +{
47526 + if (unlikely(!(gr_status & GR_READY)))
47527 + return;
47528 +
47529 + if (!(current->role->roletype & GR_ROLE_GOD))
47530 + return;
47531 +
47532 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
47533 + p->role->rolename, gr_task_roletype_to_char(p),
47534 + p->acl->filename);
47535 +}
47536 +
47537 +int
47538 +gr_handle_ptrace(struct task_struct *task, const long request)
47539 +{
47540 + struct task_struct *tmp = task;
47541 + struct task_struct *curtemp = current;
47542 + __u32 retmode;
47543 +
47544 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47545 + if (unlikely(!(gr_status & GR_READY)))
47546 + return 0;
47547 +#endif
47548 +
47549 + read_lock(&tasklist_lock);
47550 + while (tmp->pid > 0) {
47551 + if (tmp == curtemp)
47552 + break;
47553 + tmp = tmp->real_parent;
47554 + }
47555 +
47556 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47557 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
47558 + read_unlock(&tasklist_lock);
47559 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47560 + return 1;
47561 + }
47562 + read_unlock(&tasklist_lock);
47563 +
47564 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47565 + if (!(gr_status & GR_READY))
47566 + return 0;
47567 +#endif
47568 +
47569 + read_lock(&grsec_exec_file_lock);
47570 + if (unlikely(!task->exec_file)) {
47571 + read_unlock(&grsec_exec_file_lock);
47572 + return 0;
47573 + }
47574 +
47575 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
47576 + read_unlock(&grsec_exec_file_lock);
47577 +
47578 + if (retmode & GR_NOPTRACE) {
47579 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47580 + return 1;
47581 + }
47582 +
47583 + if (retmode & GR_PTRACERD) {
47584 + switch (request) {
47585 + case PTRACE_POKETEXT:
47586 + case PTRACE_POKEDATA:
47587 + case PTRACE_POKEUSR:
47588 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
47589 + case PTRACE_SETREGS:
47590 + case PTRACE_SETFPREGS:
47591 +#endif
47592 +#ifdef CONFIG_X86
47593 + case PTRACE_SETFPXREGS:
47594 +#endif
47595 +#ifdef CONFIG_ALTIVEC
47596 + case PTRACE_SETVRREGS:
47597 +#endif
47598 + return 1;
47599 + default:
47600 + return 0;
47601 + }
47602 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
47603 + !(current->role->roletype & GR_ROLE_GOD) &&
47604 + (current->acl != task->acl)) {
47605 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47606 + return 1;
47607 + }
47608 +
47609 + return 0;
47610 +}
47611 +
47612 +static int is_writable_mmap(const struct file *filp)
47613 +{
47614 + struct task_struct *task = current;
47615 + struct acl_object_label *obj, *obj2;
47616 +
47617 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
47618 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
47619 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47620 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
47621 + task->role->root_label);
47622 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
47623 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
47624 + return 1;
47625 + }
47626 + }
47627 + return 0;
47628 +}
47629 +
47630 +int
47631 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
47632 +{
47633 + __u32 mode;
47634 +
47635 + if (unlikely(!file || !(prot & PROT_EXEC)))
47636 + return 1;
47637 +
47638 + if (is_writable_mmap(file))
47639 + return 0;
47640 +
47641 + mode =
47642 + gr_search_file(file->f_path.dentry,
47643 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47644 + file->f_path.mnt);
47645 +
47646 + if (!gr_tpe_allow(file))
47647 + return 0;
47648 +
47649 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47650 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47651 + return 0;
47652 + } else if (unlikely(!(mode & GR_EXEC))) {
47653 + return 0;
47654 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47655 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47656 + return 1;
47657 + }
47658 +
47659 + return 1;
47660 +}
47661 +
47662 +int
47663 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47664 +{
47665 + __u32 mode;
47666 +
47667 + if (unlikely(!file || !(prot & PROT_EXEC)))
47668 + return 1;
47669 +
47670 + if (is_writable_mmap(file))
47671 + return 0;
47672 +
47673 + mode =
47674 + gr_search_file(file->f_path.dentry,
47675 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47676 + file->f_path.mnt);
47677 +
47678 + if (!gr_tpe_allow(file))
47679 + return 0;
47680 +
47681 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47682 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47683 + return 0;
47684 + } else if (unlikely(!(mode & GR_EXEC))) {
47685 + return 0;
47686 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47687 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47688 + return 1;
47689 + }
47690 +
47691 + return 1;
47692 +}
47693 +
47694 +void
47695 +gr_acl_handle_psacct(struct task_struct *task, const long code)
47696 +{
47697 + unsigned long runtime;
47698 + unsigned long cputime;
47699 + unsigned int wday, cday;
47700 + __u8 whr, chr;
47701 + __u8 wmin, cmin;
47702 + __u8 wsec, csec;
47703 + struct timespec timeval;
47704 +
47705 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
47706 + !(task->acl->mode & GR_PROCACCT)))
47707 + return;
47708 +
47709 + do_posix_clock_monotonic_gettime(&timeval);
47710 + runtime = timeval.tv_sec - task->start_time.tv_sec;
47711 + wday = runtime / (3600 * 24);
47712 + runtime -= wday * (3600 * 24);
47713 + whr = runtime / 3600;
47714 + runtime -= whr * 3600;
47715 + wmin = runtime / 60;
47716 + runtime -= wmin * 60;
47717 + wsec = runtime;
47718 +
47719 + cputime = (task->utime + task->stime) / HZ;
47720 + cday = cputime / (3600 * 24);
47721 + cputime -= cday * (3600 * 24);
47722 + chr = cputime / 3600;
47723 + cputime -= chr * 3600;
47724 + cmin = cputime / 60;
47725 + cputime -= cmin * 60;
47726 + csec = cputime;
47727 +
47728 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
47729 +
47730 + return;
47731 +}
47732 +
47733 +void gr_set_kernel_label(struct task_struct *task)
47734 +{
47735 + if (gr_status & GR_READY) {
47736 + task->role = kernel_role;
47737 + task->acl = kernel_role->root_label;
47738 + }
47739 + return;
47740 +}
47741 +
47742 +#ifdef CONFIG_TASKSTATS
47743 +int gr_is_taskstats_denied(int pid)
47744 +{
47745 + struct task_struct *task;
47746 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47747 + const struct cred *cred;
47748 +#endif
47749 + int ret = 0;
47750 +
47751 + /* restrict taskstats viewing to un-chrooted root users
47752 + who have the 'view' subject flag if the RBAC system is enabled
47753 + */
47754 +
47755 + rcu_read_lock();
47756 + read_lock(&tasklist_lock);
47757 + task = find_task_by_vpid(pid);
47758 + if (task) {
47759 +#ifdef CONFIG_GRKERNSEC_CHROOT
47760 + if (proc_is_chrooted(task))
47761 + ret = -EACCES;
47762 +#endif
47763 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47764 + cred = __task_cred(task);
47765 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47766 + if (cred->uid != 0)
47767 + ret = -EACCES;
47768 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47769 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
47770 + ret = -EACCES;
47771 +#endif
47772 +#endif
47773 + if (gr_status & GR_READY) {
47774 + if (!(task->acl->mode & GR_VIEW))
47775 + ret = -EACCES;
47776 + }
47777 + } else
47778 + ret = -ENOENT;
47779 +
47780 + read_unlock(&tasklist_lock);
47781 + rcu_read_unlock();
47782 +
47783 + return ret;
47784 +}
47785 +#endif
47786 +
47787 +/* AUXV entries are filled via a descendant of search_binary_handler
47788 + after we've already applied the subject for the target
47789 +*/
47790 +int gr_acl_enable_at_secure(void)
47791 +{
47792 + if (unlikely(!(gr_status & GR_READY)))
47793 + return 0;
47794 +
47795 + if (current->acl->mode & GR_ATSECURE)
47796 + return 1;
47797 +
47798 + return 0;
47799 +}
47800 +
47801 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
47802 +{
47803 + struct task_struct *task = current;
47804 + struct dentry *dentry = file->f_path.dentry;
47805 + struct vfsmount *mnt = file->f_path.mnt;
47806 + struct acl_object_label *obj, *tmp;
47807 + struct acl_subject_label *subj;
47808 + unsigned int bufsize;
47809 + int is_not_root;
47810 + char *path;
47811 + dev_t dev = __get_dev(dentry);
47812 +
47813 + if (unlikely(!(gr_status & GR_READY)))
47814 + return 1;
47815 +
47816 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
47817 + return 1;
47818 +
47819 + /* ignore Eric Biederman */
47820 + if (IS_PRIVATE(dentry->d_inode))
47821 + return 1;
47822 +
47823 + subj = task->acl;
47824 + do {
47825 + obj = lookup_acl_obj_label(ino, dev, subj);
47826 + if (obj != NULL)
47827 + return (obj->mode & GR_FIND) ? 1 : 0;
47828 + } while ((subj = subj->parent_subject));
47829 +
47830 + /* this is purely an optimization since we're looking for an object
47831 + for the directory we're doing a readdir on
47832 + if it's possible for any globbed object to match the entry we're
47833 + filling into the directory, then the object we find here will be
47834 + an anchor point with attached globbed objects
47835 + */
47836 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
47837 + if (obj->globbed == NULL)
47838 + return (obj->mode & GR_FIND) ? 1 : 0;
47839 +
47840 + is_not_root = ((obj->filename[0] == '/') &&
47841 + (obj->filename[1] == '\0')) ? 0 : 1;
47842 + bufsize = PAGE_SIZE - namelen - is_not_root;
47843 +
47844 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
47845 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
47846 + return 1;
47847 +
47848 + preempt_disable();
47849 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47850 + bufsize);
47851 +
47852 + bufsize = strlen(path);
47853 +
47854 + /* if base is "/", don't append an additional slash */
47855 + if (is_not_root)
47856 + *(path + bufsize) = '/';
47857 + memcpy(path + bufsize + is_not_root, name, namelen);
47858 + *(path + bufsize + namelen + is_not_root) = '\0';
47859 +
47860 + tmp = obj->globbed;
47861 + while (tmp) {
47862 + if (!glob_match(tmp->filename, path)) {
47863 + preempt_enable();
47864 + return (tmp->mode & GR_FIND) ? 1 : 0;
47865 + }
47866 + tmp = tmp->next;
47867 + }
47868 + preempt_enable();
47869 + return (obj->mode & GR_FIND) ? 1 : 0;
47870 +}
47871 +
47872 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
47873 +EXPORT_SYMBOL(gr_acl_is_enabled);
47874 +#endif
47875 +EXPORT_SYMBOL(gr_learn_resource);
47876 +EXPORT_SYMBOL(gr_set_kernel_label);
47877 +#ifdef CONFIG_SECURITY
47878 +EXPORT_SYMBOL(gr_check_user_change);
47879 +EXPORT_SYMBOL(gr_check_group_change);
47880 +#endif
47881 +
47882 diff -urNp linux-2.6.32.42/grsecurity/gracl_cap.c linux-2.6.32.42/grsecurity/gracl_cap.c
47883 --- linux-2.6.32.42/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
47884 +++ linux-2.6.32.42/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
47885 @@ -0,0 +1,138 @@
47886 +#include <linux/kernel.h>
47887 +#include <linux/module.h>
47888 +#include <linux/sched.h>
47889 +#include <linux/gracl.h>
47890 +#include <linux/grsecurity.h>
47891 +#include <linux/grinternal.h>
47892 +
47893 +static const char *captab_log[] = {
47894 + "CAP_CHOWN",
47895 + "CAP_DAC_OVERRIDE",
47896 + "CAP_DAC_READ_SEARCH",
47897 + "CAP_FOWNER",
47898 + "CAP_FSETID",
47899 + "CAP_KILL",
47900 + "CAP_SETGID",
47901 + "CAP_SETUID",
47902 + "CAP_SETPCAP",
47903 + "CAP_LINUX_IMMUTABLE",
47904 + "CAP_NET_BIND_SERVICE",
47905 + "CAP_NET_BROADCAST",
47906 + "CAP_NET_ADMIN",
47907 + "CAP_NET_RAW",
47908 + "CAP_IPC_LOCK",
47909 + "CAP_IPC_OWNER",
47910 + "CAP_SYS_MODULE",
47911 + "CAP_SYS_RAWIO",
47912 + "CAP_SYS_CHROOT",
47913 + "CAP_SYS_PTRACE",
47914 + "CAP_SYS_PACCT",
47915 + "CAP_SYS_ADMIN",
47916 + "CAP_SYS_BOOT",
47917 + "CAP_SYS_NICE",
47918 + "CAP_SYS_RESOURCE",
47919 + "CAP_SYS_TIME",
47920 + "CAP_SYS_TTY_CONFIG",
47921 + "CAP_MKNOD",
47922 + "CAP_LEASE",
47923 + "CAP_AUDIT_WRITE",
47924 + "CAP_AUDIT_CONTROL",
47925 + "CAP_SETFCAP",
47926 + "CAP_MAC_OVERRIDE",
47927 + "CAP_MAC_ADMIN"
47928 +};
47929 +
47930 +EXPORT_SYMBOL(gr_is_capable);
47931 +EXPORT_SYMBOL(gr_is_capable_nolog);
47932 +
47933 +int
47934 +gr_is_capable(const int cap)
47935 +{
47936 + struct task_struct *task = current;
47937 + const struct cred *cred = current_cred();
47938 + struct acl_subject_label *curracl;
47939 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
47940 + kernel_cap_t cap_audit = __cap_empty_set;
47941 +
47942 + if (!gr_acl_is_enabled())
47943 + return 1;
47944 +
47945 + curracl = task->acl;
47946 +
47947 + cap_drop = curracl->cap_lower;
47948 + cap_mask = curracl->cap_mask;
47949 + cap_audit = curracl->cap_invert_audit;
47950 +
47951 + while ((curracl = curracl->parent_subject)) {
47952 + /* if the cap isn't specified in the current computed mask but is specified in the
47953 + current level subject, and is lowered in the current level subject, then add
47954 + it to the set of dropped capabilities
47955 + otherwise, add the current level subject's mask to the current computed mask
47956 + */
47957 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
47958 + cap_raise(cap_mask, cap);
47959 + if (cap_raised(curracl->cap_lower, cap))
47960 + cap_raise(cap_drop, cap);
47961 + if (cap_raised(curracl->cap_invert_audit, cap))
47962 + cap_raise(cap_audit, cap);
47963 + }
47964 + }
47965 +
47966 + if (!cap_raised(cap_drop, cap)) {
47967 + if (cap_raised(cap_audit, cap))
47968 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
47969 + return 1;
47970 + }
47971 +
47972 + curracl = task->acl;
47973 +
47974 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
47975 + && cap_raised(cred->cap_effective, cap)) {
47976 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47977 + task->role->roletype, cred->uid,
47978 + cred->gid, task->exec_file ?
47979 + gr_to_filename(task->exec_file->f_path.dentry,
47980 + task->exec_file->f_path.mnt) : curracl->filename,
47981 + curracl->filename, 0UL,
47982 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
47983 + return 1;
47984 + }
47985 +
47986 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
47987 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
47988 + return 0;
47989 +}
47990 +
47991 +int
47992 +gr_is_capable_nolog(const int cap)
47993 +{
47994 + struct acl_subject_label *curracl;
47995 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
47996 +
47997 + if (!gr_acl_is_enabled())
47998 + return 1;
47999 +
48000 + curracl = current->acl;
48001 +
48002 + cap_drop = curracl->cap_lower;
48003 + cap_mask = curracl->cap_mask;
48004 +
48005 + while ((curracl = curracl->parent_subject)) {
48006 + /* if the cap isn't specified in the current computed mask but is specified in the
48007 + current level subject, and is lowered in the current level subject, then add
48008 + it to the set of dropped capabilities
48009 + otherwise, add the current level subject's mask to the current computed mask
48010 + */
48011 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48012 + cap_raise(cap_mask, cap);
48013 + if (cap_raised(curracl->cap_lower, cap))
48014 + cap_raise(cap_drop, cap);
48015 + }
48016 + }
48017 +
48018 + if (!cap_raised(cap_drop, cap))
48019 + return 1;
48020 +
48021 + return 0;
48022 +}
48023 +
48024 diff -urNp linux-2.6.32.42/grsecurity/gracl_fs.c linux-2.6.32.42/grsecurity/gracl_fs.c
48025 --- linux-2.6.32.42/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
48026 +++ linux-2.6.32.42/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
48027 @@ -0,0 +1,431 @@
48028 +#include <linux/kernel.h>
48029 +#include <linux/sched.h>
48030 +#include <linux/types.h>
48031 +#include <linux/fs.h>
48032 +#include <linux/file.h>
48033 +#include <linux/stat.h>
48034 +#include <linux/grsecurity.h>
48035 +#include <linux/grinternal.h>
48036 +#include <linux/gracl.h>
48037 +
48038 +__u32
48039 +gr_acl_handle_hidden_file(const struct dentry * dentry,
48040 + const struct vfsmount * mnt)
48041 +{
48042 + __u32 mode;
48043 +
48044 + if (unlikely(!dentry->d_inode))
48045 + return GR_FIND;
48046 +
48047 + mode =
48048 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
48049 +
48050 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
48051 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48052 + return mode;
48053 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
48054 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48055 + return 0;
48056 + } else if (unlikely(!(mode & GR_FIND)))
48057 + return 0;
48058 +
48059 + return GR_FIND;
48060 +}
48061 +
48062 +__u32
48063 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
48064 + const int fmode)
48065 +{
48066 + __u32 reqmode = GR_FIND;
48067 + __u32 mode;
48068 +
48069 + if (unlikely(!dentry->d_inode))
48070 + return reqmode;
48071 +
48072 + if (unlikely(fmode & O_APPEND))
48073 + reqmode |= GR_APPEND;
48074 + else if (unlikely(fmode & FMODE_WRITE))
48075 + reqmode |= GR_WRITE;
48076 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48077 + reqmode |= GR_READ;
48078 + if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
48079 + reqmode &= ~GR_READ;
48080 + mode =
48081 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48082 + mnt);
48083 +
48084 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48085 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48086 + reqmode & GR_READ ? " reading" : "",
48087 + reqmode & GR_WRITE ? " writing" : reqmode &
48088 + GR_APPEND ? " appending" : "");
48089 + return reqmode;
48090 + } else
48091 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48092 + {
48093 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48094 + reqmode & GR_READ ? " reading" : "",
48095 + reqmode & GR_WRITE ? " writing" : reqmode &
48096 + GR_APPEND ? " appending" : "");
48097 + return 0;
48098 + } else if (unlikely((mode & reqmode) != reqmode))
48099 + return 0;
48100 +
48101 + return reqmode;
48102 +}
48103 +
48104 +__u32
48105 +gr_acl_handle_creat(const struct dentry * dentry,
48106 + const struct dentry * p_dentry,
48107 + const struct vfsmount * p_mnt, const int fmode,
48108 + const int imode)
48109 +{
48110 + __u32 reqmode = GR_WRITE | GR_CREATE;
48111 + __u32 mode;
48112 +
48113 + if (unlikely(fmode & O_APPEND))
48114 + reqmode |= GR_APPEND;
48115 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48116 + reqmode |= GR_READ;
48117 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
48118 + reqmode |= GR_SETID;
48119 +
48120 + mode =
48121 + gr_check_create(dentry, p_dentry, p_mnt,
48122 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48123 +
48124 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48125 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48126 + reqmode & GR_READ ? " reading" : "",
48127 + reqmode & GR_WRITE ? " writing" : reqmode &
48128 + GR_APPEND ? " appending" : "");
48129 + return reqmode;
48130 + } else
48131 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48132 + {
48133 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48134 + reqmode & GR_READ ? " reading" : "",
48135 + reqmode & GR_WRITE ? " writing" : reqmode &
48136 + GR_APPEND ? " appending" : "");
48137 + return 0;
48138 + } else if (unlikely((mode & reqmode) != reqmode))
48139 + return 0;
48140 +
48141 + return reqmode;
48142 +}
48143 +
48144 +__u32
48145 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
48146 + const int fmode)
48147 +{
48148 + __u32 mode, reqmode = GR_FIND;
48149 +
48150 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
48151 + reqmode |= GR_EXEC;
48152 + if (fmode & S_IWOTH)
48153 + reqmode |= GR_WRITE;
48154 + if (fmode & S_IROTH)
48155 + reqmode |= GR_READ;
48156 +
48157 + mode =
48158 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48159 + mnt);
48160 +
48161 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48162 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48163 + reqmode & GR_READ ? " reading" : "",
48164 + reqmode & GR_WRITE ? " writing" : "",
48165 + reqmode & GR_EXEC ? " executing" : "");
48166 + return reqmode;
48167 + } else
48168 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48169 + {
48170 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48171 + reqmode & GR_READ ? " reading" : "",
48172 + reqmode & GR_WRITE ? " writing" : "",
48173 + reqmode & GR_EXEC ? " executing" : "");
48174 + return 0;
48175 + } else if (unlikely((mode & reqmode) != reqmode))
48176 + return 0;
48177 +
48178 + return reqmode;
48179 +}
48180 +
48181 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
48182 +{
48183 + __u32 mode;
48184 +
48185 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
48186 +
48187 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48188 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
48189 + return mode;
48190 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48191 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
48192 + return 0;
48193 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
48194 + return 0;
48195 +
48196 + return (reqmode);
48197 +}
48198 +
48199 +__u32
48200 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
48201 +{
48202 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
48203 +}
48204 +
48205 +__u32
48206 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
48207 +{
48208 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
48209 +}
48210 +
48211 +__u32
48212 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
48213 +{
48214 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
48215 +}
48216 +
48217 +__u32
48218 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
48219 +{
48220 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
48221 +}
48222 +
48223 +__u32
48224 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
48225 + mode_t mode)
48226 +{
48227 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
48228 + return 1;
48229 +
48230 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48231 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48232 + GR_FCHMOD_ACL_MSG);
48233 + } else {
48234 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
48235 + }
48236 +}
48237 +
48238 +__u32
48239 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
48240 + mode_t mode)
48241 +{
48242 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48243 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48244 + GR_CHMOD_ACL_MSG);
48245 + } else {
48246 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
48247 + }
48248 +}
48249 +
48250 +__u32
48251 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
48252 +{
48253 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
48254 +}
48255 +
48256 +__u32
48257 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
48258 +{
48259 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
48260 +}
48261 +
48262 +__u32
48263 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
48264 +{
48265 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
48266 +}
48267 +
48268 +__u32
48269 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
48270 +{
48271 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
48272 + GR_UNIXCONNECT_ACL_MSG);
48273 +}
48274 +
48275 +/* hardlinks require at minimum create permission,
48276 + any additional privilege required is based on the
48277 + privilege of the file being linked to
48278 +*/
48279 +__u32
48280 +gr_acl_handle_link(const struct dentry * new_dentry,
48281 + const struct dentry * parent_dentry,
48282 + const struct vfsmount * parent_mnt,
48283 + const struct dentry * old_dentry,
48284 + const struct vfsmount * old_mnt, const char *to)
48285 +{
48286 + __u32 mode;
48287 + __u32 needmode = GR_CREATE | GR_LINK;
48288 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
48289 +
48290 + mode =
48291 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
48292 + old_mnt);
48293 +
48294 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
48295 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48296 + return mode;
48297 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48298 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48299 + return 0;
48300 + } else if (unlikely((mode & needmode) != needmode))
48301 + return 0;
48302 +
48303 + return 1;
48304 +}
48305 +
48306 +__u32
48307 +gr_acl_handle_symlink(const struct dentry * new_dentry,
48308 + const struct dentry * parent_dentry,
48309 + const struct vfsmount * parent_mnt, const char *from)
48310 +{
48311 + __u32 needmode = GR_WRITE | GR_CREATE;
48312 + __u32 mode;
48313 +
48314 + mode =
48315 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
48316 + GR_CREATE | GR_AUDIT_CREATE |
48317 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
48318 +
48319 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
48320 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48321 + return mode;
48322 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48323 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48324 + return 0;
48325 + } else if (unlikely((mode & needmode) != needmode))
48326 + return 0;
48327 +
48328 + return (GR_WRITE | GR_CREATE);
48329 +}
48330 +
48331 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
48332 +{
48333 + __u32 mode;
48334 +
48335 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48336 +
48337 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48338 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
48339 + return mode;
48340 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48341 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
48342 + return 0;
48343 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
48344 + return 0;
48345 +
48346 + return (reqmode);
48347 +}
48348 +
48349 +__u32
48350 +gr_acl_handle_mknod(const struct dentry * new_dentry,
48351 + const struct dentry * parent_dentry,
48352 + const struct vfsmount * parent_mnt,
48353 + const int mode)
48354 +{
48355 + __u32 reqmode = GR_WRITE | GR_CREATE;
48356 + if (unlikely(mode & (S_ISUID | S_ISGID)))
48357 + reqmode |= GR_SETID;
48358 +
48359 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48360 + reqmode, GR_MKNOD_ACL_MSG);
48361 +}
48362 +
48363 +__u32
48364 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
48365 + const struct dentry *parent_dentry,
48366 + const struct vfsmount *parent_mnt)
48367 +{
48368 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48369 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
48370 +}
48371 +
48372 +#define RENAME_CHECK_SUCCESS(old, new) \
48373 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
48374 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
48375 +
48376 +int
48377 +gr_acl_handle_rename(struct dentry *new_dentry,
48378 + struct dentry *parent_dentry,
48379 + const struct vfsmount *parent_mnt,
48380 + struct dentry *old_dentry,
48381 + struct inode *old_parent_inode,
48382 + struct vfsmount *old_mnt, const char *newname)
48383 +{
48384 + __u32 comp1, comp2;
48385 + int error = 0;
48386 +
48387 + if (unlikely(!gr_acl_is_enabled()))
48388 + return 0;
48389 +
48390 + if (!new_dentry->d_inode) {
48391 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
48392 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
48393 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
48394 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
48395 + GR_DELETE | GR_AUDIT_DELETE |
48396 + GR_AUDIT_READ | GR_AUDIT_WRITE |
48397 + GR_SUPPRESS, old_mnt);
48398 + } else {
48399 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
48400 + GR_CREATE | GR_DELETE |
48401 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
48402 + GR_AUDIT_READ | GR_AUDIT_WRITE |
48403 + GR_SUPPRESS, parent_mnt);
48404 + comp2 =
48405 + gr_search_file(old_dentry,
48406 + GR_READ | GR_WRITE | GR_AUDIT_READ |
48407 + GR_DELETE | GR_AUDIT_DELETE |
48408 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
48409 + }
48410 +
48411 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
48412 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
48413 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48414 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
48415 + && !(comp2 & GR_SUPPRESS)) {
48416 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48417 + error = -EACCES;
48418 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
48419 + error = -EACCES;
48420 +
48421 + return error;
48422 +}
48423 +
48424 +void
48425 +gr_acl_handle_exit(void)
48426 +{
48427 + u16 id;
48428 + char *rolename;
48429 + struct file *exec_file;
48430 +
48431 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
48432 + !(current->role->roletype & GR_ROLE_PERSIST))) {
48433 + id = current->acl_role_id;
48434 + rolename = current->role->rolename;
48435 + gr_set_acls(1);
48436 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
48437 + }
48438 +
48439 + write_lock(&grsec_exec_file_lock);
48440 + exec_file = current->exec_file;
48441 + current->exec_file = NULL;
48442 + write_unlock(&grsec_exec_file_lock);
48443 +
48444 + if (exec_file)
48445 + fput(exec_file);
48446 +}
48447 +
48448 +int
48449 +gr_acl_handle_procpidmem(const struct task_struct *task)
48450 +{
48451 + if (unlikely(!gr_acl_is_enabled()))
48452 + return 0;
48453 +
48454 + if (task != current && task->acl->mode & GR_PROTPROCFD)
48455 + return -EACCES;
48456 +
48457 + return 0;
48458 +}
48459 diff -urNp linux-2.6.32.42/grsecurity/gracl_ip.c linux-2.6.32.42/grsecurity/gracl_ip.c
48460 --- linux-2.6.32.42/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
48461 +++ linux-2.6.32.42/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
48462 @@ -0,0 +1,382 @@
48463 +#include <linux/kernel.h>
48464 +#include <asm/uaccess.h>
48465 +#include <asm/errno.h>
48466 +#include <net/sock.h>
48467 +#include <linux/file.h>
48468 +#include <linux/fs.h>
48469 +#include <linux/net.h>
48470 +#include <linux/in.h>
48471 +#include <linux/skbuff.h>
48472 +#include <linux/ip.h>
48473 +#include <linux/udp.h>
48474 +#include <linux/smp_lock.h>
48475 +#include <linux/types.h>
48476 +#include <linux/sched.h>
48477 +#include <linux/netdevice.h>
48478 +#include <linux/inetdevice.h>
48479 +#include <linux/gracl.h>
48480 +#include <linux/grsecurity.h>
48481 +#include <linux/grinternal.h>
48482 +
48483 +#define GR_BIND 0x01
48484 +#define GR_CONNECT 0x02
48485 +#define GR_INVERT 0x04
48486 +#define GR_BINDOVERRIDE 0x08
48487 +#define GR_CONNECTOVERRIDE 0x10
48488 +#define GR_SOCK_FAMILY 0x20
48489 +
48490 +static const char * gr_protocols[IPPROTO_MAX] = {
48491 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
48492 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
48493 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
48494 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
48495 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
48496 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
48497 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
48498 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
48499 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
48500 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
48501 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
48502 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
48503 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
48504 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
48505 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
48506 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
48507 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
48508 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
48509 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
48510 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
48511 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
48512 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
48513 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
48514 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
48515 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
48516 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
48517 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
48518 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
48519 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
48520 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
48521 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
48522 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
48523 + };
48524 +
48525 +static const char * gr_socktypes[SOCK_MAX] = {
48526 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
48527 + "unknown:7", "unknown:8", "unknown:9", "packet"
48528 + };
48529 +
48530 +static const char * gr_sockfamilies[AF_MAX+1] = {
48531 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
48532 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
48533 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
48534 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
48535 + };
48536 +
48537 +const char *
48538 +gr_proto_to_name(unsigned char proto)
48539 +{
48540 + return gr_protocols[proto];
48541 +}
48542 +
48543 +const char *
48544 +gr_socktype_to_name(unsigned char type)
48545 +{
48546 + return gr_socktypes[type];
48547 +}
48548 +
48549 +const char *
48550 +gr_sockfamily_to_name(unsigned char family)
48551 +{
48552 + return gr_sockfamilies[family];
48553 +}
48554 +
48555 +int
48556 +gr_search_socket(const int domain, const int type, const int protocol)
48557 +{
48558 + struct acl_subject_label *curr;
48559 + const struct cred *cred = current_cred();
48560 +
48561 + if (unlikely(!gr_acl_is_enabled()))
48562 + goto exit;
48563 +
48564 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
48565 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
48566 + goto exit; // let the kernel handle it
48567 +
48568 + curr = current->acl;
48569 +
48570 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
48571 + /* the family is allowed, if this is PF_INET allow it only if
48572 + the extra sock type/protocol checks pass */
48573 + if (domain == PF_INET)
48574 + goto inet_check;
48575 + goto exit;
48576 + } else {
48577 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48578 + __u32 fakeip = 0;
48579 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48580 + current->role->roletype, cred->uid,
48581 + cred->gid, current->exec_file ?
48582 + gr_to_filename(current->exec_file->f_path.dentry,
48583 + current->exec_file->f_path.mnt) :
48584 + curr->filename, curr->filename,
48585 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
48586 + &current->signal->saved_ip);
48587 + goto exit;
48588 + }
48589 + goto exit_fail;
48590 + }
48591 +
48592 +inet_check:
48593 + /* the rest of this checking is for IPv4 only */
48594 + if (!curr->ips)
48595 + goto exit;
48596 +
48597 + if ((curr->ip_type & (1 << type)) &&
48598 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
48599 + goto exit;
48600 +
48601 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48602 + /* we don't place acls on raw sockets , and sometimes
48603 + dgram/ip sockets are opened for ioctl and not
48604 + bind/connect, so we'll fake a bind learn log */
48605 + if (type == SOCK_RAW || type == SOCK_PACKET) {
48606 + __u32 fakeip = 0;
48607 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48608 + current->role->roletype, cred->uid,
48609 + cred->gid, current->exec_file ?
48610 + gr_to_filename(current->exec_file->f_path.dentry,
48611 + current->exec_file->f_path.mnt) :
48612 + curr->filename, curr->filename,
48613 + &fakeip, 0, type,
48614 + protocol, GR_CONNECT, &current->signal->saved_ip);
48615 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
48616 + __u32 fakeip = 0;
48617 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48618 + current->role->roletype, cred->uid,
48619 + cred->gid, current->exec_file ?
48620 + gr_to_filename(current->exec_file->f_path.dentry,
48621 + current->exec_file->f_path.mnt) :
48622 + curr->filename, curr->filename,
48623 + &fakeip, 0, type,
48624 + protocol, GR_BIND, &current->signal->saved_ip);
48625 + }
48626 + /* we'll log when they use connect or bind */
48627 + goto exit;
48628 + }
48629 +
48630 +exit_fail:
48631 + if (domain == PF_INET)
48632 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
48633 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
48634 + else
48635 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
48636 + gr_socktype_to_name(type), protocol);
48637 +
48638 + return 0;
48639 +exit:
48640 + return 1;
48641 +}
48642 +
48643 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
48644 +{
48645 + if ((ip->mode & mode) &&
48646 + (ip_port >= ip->low) &&
48647 + (ip_port <= ip->high) &&
48648 + ((ntohl(ip_addr) & our_netmask) ==
48649 + (ntohl(our_addr) & our_netmask))
48650 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
48651 + && (ip->type & (1 << type))) {
48652 + if (ip->mode & GR_INVERT)
48653 + return 2; // specifically denied
48654 + else
48655 + return 1; // allowed
48656 + }
48657 +
48658 + return 0; // not specifically allowed, may continue parsing
48659 +}
48660 +
48661 +static int
48662 +gr_search_connectbind(const int full_mode, struct sock *sk,
48663 + struct sockaddr_in *addr, const int type)
48664 +{
48665 + char iface[IFNAMSIZ] = {0};
48666 + struct acl_subject_label *curr;
48667 + struct acl_ip_label *ip;
48668 + struct inet_sock *isk;
48669 + struct net_device *dev;
48670 + struct in_device *idev;
48671 + unsigned long i;
48672 + int ret;
48673 + int mode = full_mode & (GR_BIND | GR_CONNECT);
48674 + __u32 ip_addr = 0;
48675 + __u32 our_addr;
48676 + __u32 our_netmask;
48677 + char *p;
48678 + __u16 ip_port = 0;
48679 + const struct cred *cred = current_cred();
48680 +
48681 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
48682 + return 0;
48683 +
48684 + curr = current->acl;
48685 + isk = inet_sk(sk);
48686 +
48687 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
48688 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
48689 + addr->sin_addr.s_addr = curr->inaddr_any_override;
48690 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
48691 + struct sockaddr_in saddr;
48692 + int err;
48693 +
48694 + saddr.sin_family = AF_INET;
48695 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
48696 + saddr.sin_port = isk->sport;
48697 +
48698 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
48699 + if (err)
48700 + return err;
48701 +
48702 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
48703 + if (err)
48704 + return err;
48705 + }
48706 +
48707 + if (!curr->ips)
48708 + return 0;
48709 +
48710 + ip_addr = addr->sin_addr.s_addr;
48711 + ip_port = ntohs(addr->sin_port);
48712 +
48713 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48714 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48715 + current->role->roletype, cred->uid,
48716 + cred->gid, current->exec_file ?
48717 + gr_to_filename(current->exec_file->f_path.dentry,
48718 + current->exec_file->f_path.mnt) :
48719 + curr->filename, curr->filename,
48720 + &ip_addr, ip_port, type,
48721 + sk->sk_protocol, mode, &current->signal->saved_ip);
48722 + return 0;
48723 + }
48724 +
48725 + for (i = 0; i < curr->ip_num; i++) {
48726 + ip = *(curr->ips + i);
48727 + if (ip->iface != NULL) {
48728 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
48729 + p = strchr(iface, ':');
48730 + if (p != NULL)
48731 + *p = '\0';
48732 + dev = dev_get_by_name(sock_net(sk), iface);
48733 + if (dev == NULL)
48734 + continue;
48735 + idev = in_dev_get(dev);
48736 + if (idev == NULL) {
48737 + dev_put(dev);
48738 + continue;
48739 + }
48740 + rcu_read_lock();
48741 + for_ifa(idev) {
48742 + if (!strcmp(ip->iface, ifa->ifa_label)) {
48743 + our_addr = ifa->ifa_address;
48744 + our_netmask = 0xffffffff;
48745 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
48746 + if (ret == 1) {
48747 + rcu_read_unlock();
48748 + in_dev_put(idev);
48749 + dev_put(dev);
48750 + return 0;
48751 + } else if (ret == 2) {
48752 + rcu_read_unlock();
48753 + in_dev_put(idev);
48754 + dev_put(dev);
48755 + goto denied;
48756 + }
48757 + }
48758 + } endfor_ifa(idev);
48759 + rcu_read_unlock();
48760 + in_dev_put(idev);
48761 + dev_put(dev);
48762 + } else {
48763 + our_addr = ip->addr;
48764 + our_netmask = ip->netmask;
48765 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
48766 + if (ret == 1)
48767 + return 0;
48768 + else if (ret == 2)
48769 + goto denied;
48770 + }
48771 + }
48772 +
48773 +denied:
48774 + if (mode == GR_BIND)
48775 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
48776 + else if (mode == GR_CONNECT)
48777 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
48778 +
48779 + return -EACCES;
48780 +}
48781 +
48782 +int
48783 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
48784 +{
48785 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
48786 +}
48787 +
48788 +int
48789 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
48790 +{
48791 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
48792 +}
48793 +
48794 +int gr_search_listen(struct socket *sock)
48795 +{
48796 + struct sock *sk = sock->sk;
48797 + struct sockaddr_in addr;
48798 +
48799 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
48800 + addr.sin_port = inet_sk(sk)->sport;
48801 +
48802 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
48803 +}
48804 +
48805 +int gr_search_accept(struct socket *sock)
48806 +{
48807 + struct sock *sk = sock->sk;
48808 + struct sockaddr_in addr;
48809 +
48810 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
48811 + addr.sin_port = inet_sk(sk)->sport;
48812 +
48813 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
48814 +}
48815 +
48816 +int
48817 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
48818 +{
48819 + if (addr)
48820 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
48821 + else {
48822 + struct sockaddr_in sin;
48823 + const struct inet_sock *inet = inet_sk(sk);
48824 +
48825 + sin.sin_addr.s_addr = inet->daddr;
48826 + sin.sin_port = inet->dport;
48827 +
48828 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
48829 + }
48830 +}
48831 +
48832 +int
48833 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
48834 +{
48835 + struct sockaddr_in sin;
48836 +
48837 + if (unlikely(skb->len < sizeof (struct udphdr)))
48838 + return 0; // skip this packet
48839 +
48840 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
48841 + sin.sin_port = udp_hdr(skb)->source;
48842 +
48843 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
48844 +}
48845 diff -urNp linux-2.6.32.42/grsecurity/gracl_learn.c linux-2.6.32.42/grsecurity/gracl_learn.c
48846 --- linux-2.6.32.42/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
48847 +++ linux-2.6.32.42/grsecurity/gracl_learn.c 2011-04-17 15:56:46.000000000 -0400
48848 @@ -0,0 +1,211 @@
48849 +#include <linux/kernel.h>
48850 +#include <linux/mm.h>
48851 +#include <linux/sched.h>
48852 +#include <linux/poll.h>
48853 +#include <linux/smp_lock.h>
48854 +#include <linux/string.h>
48855 +#include <linux/file.h>
48856 +#include <linux/types.h>
48857 +#include <linux/vmalloc.h>
48858 +#include <linux/grinternal.h>
48859 +
48860 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
48861 + size_t count, loff_t *ppos);
48862 +extern int gr_acl_is_enabled(void);
48863 +
48864 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
48865 +static int gr_learn_attached;
48866 +
48867 +/* use a 512k buffer */
48868 +#define LEARN_BUFFER_SIZE (512 * 1024)
48869 +
48870 +static DEFINE_SPINLOCK(gr_learn_lock);
48871 +static DEFINE_MUTEX(gr_learn_user_mutex);
48872 +
48873 +/* we need to maintain two buffers, so that the kernel context of grlearn
48874 + uses a semaphore around the userspace copying, and the other kernel contexts
48875 + use a spinlock when copying into the buffer, since they cannot sleep
48876 +*/
48877 +static char *learn_buffer;
48878 +static char *learn_buffer_user;
48879 +static int learn_buffer_len;
48880 +static int learn_buffer_user_len;
48881 +
48882 +static ssize_t
48883 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
48884 +{
48885 + DECLARE_WAITQUEUE(wait, current);
48886 + ssize_t retval = 0;
48887 +
48888 + add_wait_queue(&learn_wait, &wait);
48889 + set_current_state(TASK_INTERRUPTIBLE);
48890 + do {
48891 + mutex_lock(&gr_learn_user_mutex);
48892 + spin_lock(&gr_learn_lock);
48893 + if (learn_buffer_len)
48894 + break;
48895 + spin_unlock(&gr_learn_lock);
48896 + mutex_unlock(&gr_learn_user_mutex);
48897 + if (file->f_flags & O_NONBLOCK) {
48898 + retval = -EAGAIN;
48899 + goto out;
48900 + }
48901 + if (signal_pending(current)) {
48902 + retval = -ERESTARTSYS;
48903 + goto out;
48904 + }
48905 +
48906 + schedule();
48907 + } while (1);
48908 +
48909 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
48910 + learn_buffer_user_len = learn_buffer_len;
48911 + retval = learn_buffer_len;
48912 + learn_buffer_len = 0;
48913 +
48914 + spin_unlock(&gr_learn_lock);
48915 +
48916 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
48917 + retval = -EFAULT;
48918 +
48919 + mutex_unlock(&gr_learn_user_mutex);
48920 +out:
48921 + set_current_state(TASK_RUNNING);
48922 + remove_wait_queue(&learn_wait, &wait);
48923 + return retval;
48924 +}
48925 +
48926 +static unsigned int
48927 +poll_learn(struct file * file, poll_table * wait)
48928 +{
48929 + poll_wait(file, &learn_wait, wait);
48930 +
48931 + if (learn_buffer_len)
48932 + return (POLLIN | POLLRDNORM);
48933 +
48934 + return 0;
48935 +}
48936 +
48937 +void
48938 +gr_clear_learn_entries(void)
48939 +{
48940 + char *tmp;
48941 +
48942 + mutex_lock(&gr_learn_user_mutex);
48943 + if (learn_buffer != NULL) {
48944 + spin_lock(&gr_learn_lock);
48945 + tmp = learn_buffer;
48946 + learn_buffer = NULL;
48947 + spin_unlock(&gr_learn_lock);
48948 + vfree(learn_buffer);
48949 + }
48950 + if (learn_buffer_user != NULL) {
48951 + vfree(learn_buffer_user);
48952 + learn_buffer_user = NULL;
48953 + }
48954 + learn_buffer_len = 0;
48955 + mutex_unlock(&gr_learn_user_mutex);
48956 +
48957 + return;
48958 +}
48959 +
48960 +void
48961 +gr_add_learn_entry(const char *fmt, ...)
48962 +{
48963 + va_list args;
48964 + unsigned int len;
48965 +
48966 + if (!gr_learn_attached)
48967 + return;
48968 +
48969 + spin_lock(&gr_learn_lock);
48970 +
48971 + /* leave a gap at the end so we know when it's "full" but don't have to
48972 + compute the exact length of the string we're trying to append
48973 + */
48974 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
48975 + spin_unlock(&gr_learn_lock);
48976 + wake_up_interruptible(&learn_wait);
48977 + return;
48978 + }
48979 + if (learn_buffer == NULL) {
48980 + spin_unlock(&gr_learn_lock);
48981 + return;
48982 + }
48983 +
48984 + va_start(args, fmt);
48985 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
48986 + va_end(args);
48987 +
48988 + learn_buffer_len += len + 1;
48989 +
48990 + spin_unlock(&gr_learn_lock);
48991 + wake_up_interruptible(&learn_wait);
48992 +
48993 + return;
48994 +}
48995 +
48996 +static int
48997 +open_learn(struct inode *inode, struct file *file)
48998 +{
48999 + if (file->f_mode & FMODE_READ && gr_learn_attached)
49000 + return -EBUSY;
49001 + if (file->f_mode & FMODE_READ) {
49002 + int retval = 0;
49003 + mutex_lock(&gr_learn_user_mutex);
49004 + if (learn_buffer == NULL)
49005 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
49006 + if (learn_buffer_user == NULL)
49007 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
49008 + if (learn_buffer == NULL) {
49009 + retval = -ENOMEM;
49010 + goto out_error;
49011 + }
49012 + if (learn_buffer_user == NULL) {
49013 + retval = -ENOMEM;
49014 + goto out_error;
49015 + }
49016 + learn_buffer_len = 0;
49017 + learn_buffer_user_len = 0;
49018 + gr_learn_attached = 1;
49019 +out_error:
49020 + mutex_unlock(&gr_learn_user_mutex);
49021 + return retval;
49022 + }
49023 + return 0;
49024 +}
49025 +
49026 +static int
49027 +close_learn(struct inode *inode, struct file *file)
49028 +{
49029 + char *tmp;
49030 +
49031 + if (file->f_mode & FMODE_READ) {
49032 + mutex_lock(&gr_learn_user_mutex);
49033 + if (learn_buffer != NULL) {
49034 + spin_lock(&gr_learn_lock);
49035 + tmp = learn_buffer;
49036 + learn_buffer = NULL;
49037 + spin_unlock(&gr_learn_lock);
49038 + vfree(tmp);
49039 + }
49040 + if (learn_buffer_user != NULL) {
49041 + vfree(learn_buffer_user);
49042 + learn_buffer_user = NULL;
49043 + }
49044 + learn_buffer_len = 0;
49045 + learn_buffer_user_len = 0;
49046 + gr_learn_attached = 0;
49047 + mutex_unlock(&gr_learn_user_mutex);
49048 + }
49049 +
49050 + return 0;
49051 +}
49052 +
49053 +const struct file_operations grsec_fops = {
49054 + .read = read_learn,
49055 + .write = write_grsec_handler,
49056 + .open = open_learn,
49057 + .release = close_learn,
49058 + .poll = poll_learn,
49059 +};
49060 diff -urNp linux-2.6.32.42/grsecurity/gracl_res.c linux-2.6.32.42/grsecurity/gracl_res.c
49061 --- linux-2.6.32.42/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
49062 +++ linux-2.6.32.42/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
49063 @@ -0,0 +1,67 @@
49064 +#include <linux/kernel.h>
49065 +#include <linux/sched.h>
49066 +#include <linux/gracl.h>
49067 +#include <linux/grinternal.h>
49068 +
49069 +static const char *restab_log[] = {
49070 + [RLIMIT_CPU] = "RLIMIT_CPU",
49071 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
49072 + [RLIMIT_DATA] = "RLIMIT_DATA",
49073 + [RLIMIT_STACK] = "RLIMIT_STACK",
49074 + [RLIMIT_CORE] = "RLIMIT_CORE",
49075 + [RLIMIT_RSS] = "RLIMIT_RSS",
49076 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
49077 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
49078 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
49079 + [RLIMIT_AS] = "RLIMIT_AS",
49080 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
49081 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
49082 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
49083 + [RLIMIT_NICE] = "RLIMIT_NICE",
49084 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
49085 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
49086 + [GR_CRASH_RES] = "RLIMIT_CRASH"
49087 +};
49088 +
49089 +void
49090 +gr_log_resource(const struct task_struct *task,
49091 + const int res, const unsigned long wanted, const int gt)
49092 +{
49093 + const struct cred *cred;
49094 + unsigned long rlim;
49095 +
49096 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
49097 + return;
49098 +
49099 + // not yet supported resource
49100 + if (unlikely(!restab_log[res]))
49101 + return;
49102 +
49103 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
49104 + rlim = task->signal->rlim[res].rlim_max;
49105 + else
49106 + rlim = task->signal->rlim[res].rlim_cur;
49107 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
49108 + return;
49109 +
49110 + rcu_read_lock();
49111 + cred = __task_cred(task);
49112 +
49113 + if (res == RLIMIT_NPROC &&
49114 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
49115 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
49116 + goto out_rcu_unlock;
49117 + else if (res == RLIMIT_MEMLOCK &&
49118 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
49119 + goto out_rcu_unlock;
49120 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
49121 + goto out_rcu_unlock;
49122 + rcu_read_unlock();
49123 +
49124 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
49125 +
49126 + return;
49127 +out_rcu_unlock:
49128 + rcu_read_unlock();
49129 + return;
49130 +}
49131 diff -urNp linux-2.6.32.42/grsecurity/gracl_segv.c linux-2.6.32.42/grsecurity/gracl_segv.c
49132 --- linux-2.6.32.42/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
49133 +++ linux-2.6.32.42/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
49134 @@ -0,0 +1,284 @@
49135 +#include <linux/kernel.h>
49136 +#include <linux/mm.h>
49137 +#include <asm/uaccess.h>
49138 +#include <asm/errno.h>
49139 +#include <asm/mman.h>
49140 +#include <net/sock.h>
49141 +#include <linux/file.h>
49142 +#include <linux/fs.h>
49143 +#include <linux/net.h>
49144 +#include <linux/in.h>
49145 +#include <linux/smp_lock.h>
49146 +#include <linux/slab.h>
49147 +#include <linux/types.h>
49148 +#include <linux/sched.h>
49149 +#include <linux/timer.h>
49150 +#include <linux/gracl.h>
49151 +#include <linux/grsecurity.h>
49152 +#include <linux/grinternal.h>
49153 +
49154 +static struct crash_uid *uid_set;
49155 +static unsigned short uid_used;
49156 +static DEFINE_SPINLOCK(gr_uid_lock);
49157 +extern rwlock_t gr_inode_lock;
49158 +extern struct acl_subject_label *
49159 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
49160 + struct acl_role_label *role);
49161 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
49162 +
49163 +int
49164 +gr_init_uidset(void)
49165 +{
49166 + uid_set =
49167 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
49168 + uid_used = 0;
49169 +
49170 + return uid_set ? 1 : 0;
49171 +}
49172 +
49173 +void
49174 +gr_free_uidset(void)
49175 +{
49176 + if (uid_set)
49177 + kfree(uid_set);
49178 +
49179 + return;
49180 +}
49181 +
49182 +int
49183 +gr_find_uid(const uid_t uid)
49184 +{
49185 + struct crash_uid *tmp = uid_set;
49186 + uid_t buid;
49187 + int low = 0, high = uid_used - 1, mid;
49188 +
49189 + while (high >= low) {
49190 + mid = (low + high) >> 1;
49191 + buid = tmp[mid].uid;
49192 + if (buid == uid)
49193 + return mid;
49194 + if (buid > uid)
49195 + high = mid - 1;
49196 + if (buid < uid)
49197 + low = mid + 1;
49198 + }
49199 +
49200 + return -1;
49201 +}
49202 +
49203 +static __inline__ void
49204 +gr_insertsort(void)
49205 +{
49206 + unsigned short i, j;
49207 + struct crash_uid index;
49208 +
49209 + for (i = 1; i < uid_used; i++) {
49210 + index = uid_set[i];
49211 + j = i;
49212 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
49213 + uid_set[j] = uid_set[j - 1];
49214 + j--;
49215 + }
49216 + uid_set[j] = index;
49217 + }
49218 +
49219 + return;
49220 +}
49221 +
49222 +static __inline__ void
49223 +gr_insert_uid(const uid_t uid, const unsigned long expires)
49224 +{
49225 + int loc;
49226 +
49227 + if (uid_used == GR_UIDTABLE_MAX)
49228 + return;
49229 +
49230 + loc = gr_find_uid(uid);
49231 +
49232 + if (loc >= 0) {
49233 + uid_set[loc].expires = expires;
49234 + return;
49235 + }
49236 +
49237 + uid_set[uid_used].uid = uid;
49238 + uid_set[uid_used].expires = expires;
49239 + uid_used++;
49240 +
49241 + gr_insertsort();
49242 +
49243 + return;
49244 +}
49245 +
49246 +void
49247 +gr_remove_uid(const unsigned short loc)
49248 +{
49249 + unsigned short i;
49250 +
49251 + for (i = loc + 1; i < uid_used; i++)
49252 + uid_set[i - 1] = uid_set[i];
49253 +
49254 + uid_used--;
49255 +
49256 + return;
49257 +}
49258 +
49259 +int
49260 +gr_check_crash_uid(const uid_t uid)
49261 +{
49262 + int loc;
49263 + int ret = 0;
49264 +
49265 + if (unlikely(!gr_acl_is_enabled()))
49266 + return 0;
49267 +
49268 + spin_lock(&gr_uid_lock);
49269 + loc = gr_find_uid(uid);
49270 +
49271 + if (loc < 0)
49272 + goto out_unlock;
49273 +
49274 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
49275 + gr_remove_uid(loc);
49276 + else
49277 + ret = 1;
49278 +
49279 +out_unlock:
49280 + spin_unlock(&gr_uid_lock);
49281 + return ret;
49282 +}
49283 +
49284 +static __inline__ int
49285 +proc_is_setxid(const struct cred *cred)
49286 +{
49287 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
49288 + cred->uid != cred->fsuid)
49289 + return 1;
49290 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
49291 + cred->gid != cred->fsgid)
49292 + return 1;
49293 +
49294 + return 0;
49295 +}
49296 +
49297 +void
49298 +gr_handle_crash(struct task_struct *task, const int sig)
49299 +{
49300 + struct acl_subject_label *curr;
49301 + struct acl_subject_label *curr2;
49302 + struct task_struct *tsk, *tsk2;
49303 + const struct cred *cred;
49304 + const struct cred *cred2;
49305 +
49306 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
49307 + return;
49308 +
49309 + if (unlikely(!gr_acl_is_enabled()))
49310 + return;
49311 +
49312 + curr = task->acl;
49313 +
49314 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
49315 + return;
49316 +
49317 + if (time_before_eq(curr->expires, get_seconds())) {
49318 + curr->expires = 0;
49319 + curr->crashes = 0;
49320 + }
49321 +
49322 + curr->crashes++;
49323 +
49324 + if (!curr->expires)
49325 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
49326 +
49327 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49328 + time_after(curr->expires, get_seconds())) {
49329 + rcu_read_lock();
49330 + cred = __task_cred(task);
49331 + if (cred->uid && proc_is_setxid(cred)) {
49332 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49333 + spin_lock(&gr_uid_lock);
49334 + gr_insert_uid(cred->uid, curr->expires);
49335 + spin_unlock(&gr_uid_lock);
49336 + curr->expires = 0;
49337 + curr->crashes = 0;
49338 + read_lock(&tasklist_lock);
49339 + do_each_thread(tsk2, tsk) {
49340 + cred2 = __task_cred(tsk);
49341 + if (tsk != task && cred2->uid == cred->uid)
49342 + gr_fake_force_sig(SIGKILL, tsk);
49343 + } while_each_thread(tsk2, tsk);
49344 + read_unlock(&tasklist_lock);
49345 + } else {
49346 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49347 + read_lock(&tasklist_lock);
49348 + do_each_thread(tsk2, tsk) {
49349 + if (likely(tsk != task)) {
49350 + curr2 = tsk->acl;
49351 +
49352 + if (curr2->device == curr->device &&
49353 + curr2->inode == curr->inode)
49354 + gr_fake_force_sig(SIGKILL, tsk);
49355 + }
49356 + } while_each_thread(tsk2, tsk);
49357 + read_unlock(&tasklist_lock);
49358 + }
49359 + rcu_read_unlock();
49360 + }
49361 +
49362 + return;
49363 +}
49364 +
49365 +int
49366 +gr_check_crash_exec(const struct file *filp)
49367 +{
49368 + struct acl_subject_label *curr;
49369 +
49370 + if (unlikely(!gr_acl_is_enabled()))
49371 + return 0;
49372 +
49373 + read_lock(&gr_inode_lock);
49374 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
49375 + filp->f_path.dentry->d_inode->i_sb->s_dev,
49376 + current->role);
49377 + read_unlock(&gr_inode_lock);
49378 +
49379 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
49380 + (!curr->crashes && !curr->expires))
49381 + return 0;
49382 +
49383 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49384 + time_after(curr->expires, get_seconds()))
49385 + return 1;
49386 + else if (time_before_eq(curr->expires, get_seconds())) {
49387 + curr->crashes = 0;
49388 + curr->expires = 0;
49389 + }
49390 +
49391 + return 0;
49392 +}
49393 +
49394 +void
49395 +gr_handle_alertkill(struct task_struct *task)
49396 +{
49397 + struct acl_subject_label *curracl;
49398 + __u32 curr_ip;
49399 + struct task_struct *p, *p2;
49400 +
49401 + if (unlikely(!gr_acl_is_enabled()))
49402 + return;
49403 +
49404 + curracl = task->acl;
49405 + curr_ip = task->signal->curr_ip;
49406 +
49407 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
49408 + read_lock(&tasklist_lock);
49409 + do_each_thread(p2, p) {
49410 + if (p->signal->curr_ip == curr_ip)
49411 + gr_fake_force_sig(SIGKILL, p);
49412 + } while_each_thread(p2, p);
49413 + read_unlock(&tasklist_lock);
49414 + } else if (curracl->mode & GR_KILLPROC)
49415 + gr_fake_force_sig(SIGKILL, task);
49416 +
49417 + return;
49418 +}
49419 diff -urNp linux-2.6.32.42/grsecurity/gracl_shm.c linux-2.6.32.42/grsecurity/gracl_shm.c
49420 --- linux-2.6.32.42/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
49421 +++ linux-2.6.32.42/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
49422 @@ -0,0 +1,40 @@
49423 +#include <linux/kernel.h>
49424 +#include <linux/mm.h>
49425 +#include <linux/sched.h>
49426 +#include <linux/file.h>
49427 +#include <linux/ipc.h>
49428 +#include <linux/gracl.h>
49429 +#include <linux/grsecurity.h>
49430 +#include <linux/grinternal.h>
49431 +
49432 +int
49433 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49434 + const time_t shm_createtime, const uid_t cuid, const int shmid)
49435 +{
49436 + struct task_struct *task;
49437 +
49438 + if (!gr_acl_is_enabled())
49439 + return 1;
49440 +
49441 + rcu_read_lock();
49442 + read_lock(&tasklist_lock);
49443 +
49444 + task = find_task_by_vpid(shm_cprid);
49445 +
49446 + if (unlikely(!task))
49447 + task = find_task_by_vpid(shm_lapid);
49448 +
49449 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
49450 + (task->pid == shm_lapid)) &&
49451 + (task->acl->mode & GR_PROTSHM) &&
49452 + (task->acl != current->acl))) {
49453 + read_unlock(&tasklist_lock);
49454 + rcu_read_unlock();
49455 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
49456 + return 0;
49457 + }
49458 + read_unlock(&tasklist_lock);
49459 + rcu_read_unlock();
49460 +
49461 + return 1;
49462 +}
49463 diff -urNp linux-2.6.32.42/grsecurity/grsec_chdir.c linux-2.6.32.42/grsecurity/grsec_chdir.c
49464 --- linux-2.6.32.42/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
49465 +++ linux-2.6.32.42/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
49466 @@ -0,0 +1,19 @@
49467 +#include <linux/kernel.h>
49468 +#include <linux/sched.h>
49469 +#include <linux/fs.h>
49470 +#include <linux/file.h>
49471 +#include <linux/grsecurity.h>
49472 +#include <linux/grinternal.h>
49473 +
49474 +void
49475 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
49476 +{
49477 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49478 + if ((grsec_enable_chdir && grsec_enable_group &&
49479 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
49480 + !grsec_enable_group)) {
49481 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
49482 + }
49483 +#endif
49484 + return;
49485 +}
49486 diff -urNp linux-2.6.32.42/grsecurity/grsec_chroot.c linux-2.6.32.42/grsecurity/grsec_chroot.c
49487 --- linux-2.6.32.42/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
49488 +++ linux-2.6.32.42/grsecurity/grsec_chroot.c 2011-06-20 19:44:00.000000000 -0400
49489 @@ -0,0 +1,395 @@
49490 +#include <linux/kernel.h>
49491 +#include <linux/module.h>
49492 +#include <linux/sched.h>
49493 +#include <linux/file.h>
49494 +#include <linux/fs.h>
49495 +#include <linux/mount.h>
49496 +#include <linux/types.h>
49497 +#include <linux/pid_namespace.h>
49498 +#include <linux/grsecurity.h>
49499 +#include <linux/grinternal.h>
49500 +
49501 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
49502 +{
49503 +#ifdef CONFIG_GRKERNSEC
49504 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
49505 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
49506 + task->gr_is_chrooted = 1;
49507 + else
49508 + task->gr_is_chrooted = 0;
49509 +
49510 + task->gr_chroot_dentry = path->dentry;
49511 +#endif
49512 + return;
49513 +}
49514 +
49515 +void gr_clear_chroot_entries(struct task_struct *task)
49516 +{
49517 +#ifdef CONFIG_GRKERNSEC
49518 + task->gr_is_chrooted = 0;
49519 + task->gr_chroot_dentry = NULL;
49520 +#endif
49521 + return;
49522 +}
49523 +
49524 +int
49525 +gr_handle_chroot_unix(const pid_t pid)
49526 +{
49527 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49528 + struct pid *spid = NULL;
49529 +
49530 + if (unlikely(!grsec_enable_chroot_unix))
49531 + return 1;
49532 +
49533 + if (likely(!proc_is_chrooted(current)))
49534 + return 1;
49535 +
49536 + rcu_read_lock();
49537 + read_lock(&tasklist_lock);
49538 +
49539 + spid = find_vpid(pid);
49540 + if (spid) {
49541 + struct task_struct *p;
49542 + p = pid_task(spid, PIDTYPE_PID);
49543 + if (unlikely(p && !have_same_root(current, p))) {
49544 + read_unlock(&tasklist_lock);
49545 + rcu_read_unlock();
49546 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
49547 + return 0;
49548 + }
49549 + }
49550 + read_unlock(&tasklist_lock);
49551 + rcu_read_unlock();
49552 +#endif
49553 + return 1;
49554 +}
49555 +
49556 +int
49557 +gr_handle_chroot_nice(void)
49558 +{
49559 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49560 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
49561 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
49562 + return -EPERM;
49563 + }
49564 +#endif
49565 + return 0;
49566 +}
49567 +
49568 +int
49569 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
49570 +{
49571 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49572 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
49573 + && proc_is_chrooted(current)) {
49574 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
49575 + return -EACCES;
49576 + }
49577 +#endif
49578 + return 0;
49579 +}
49580 +
49581 +int
49582 +gr_handle_chroot_rawio(const struct inode *inode)
49583 +{
49584 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49585 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
49586 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
49587 + return 1;
49588 +#endif
49589 + return 0;
49590 +}
49591 +
49592 +int
49593 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
49594 +{
49595 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49596 + struct task_struct *p;
49597 + int ret = 0;
49598 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
49599 + return ret;
49600 +
49601 + read_lock(&tasklist_lock);
49602 + do_each_pid_task(pid, type, p) {
49603 + if (!have_same_root(current, p)) {
49604 + ret = 1;
49605 + goto out;
49606 + }
49607 + } while_each_pid_task(pid, type, p);
49608 +out:
49609 + read_unlock(&tasklist_lock);
49610 + return ret;
49611 +#endif
49612 + return 0;
49613 +}
49614 +
49615 +int
49616 +gr_pid_is_chrooted(struct task_struct *p)
49617 +{
49618 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49619 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
49620 + return 0;
49621 +
49622 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
49623 + !have_same_root(current, p)) {
49624 + return 1;
49625 + }
49626 +#endif
49627 + return 0;
49628 +}
49629 +
49630 +EXPORT_SYMBOL(gr_pid_is_chrooted);
49631 +
49632 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
49633 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
49634 +{
49635 + struct dentry *dentry = (struct dentry *)u_dentry;
49636 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
49637 + struct dentry *realroot;
49638 + struct vfsmount *realrootmnt;
49639 + struct dentry *currentroot;
49640 + struct vfsmount *currentmnt;
49641 + struct task_struct *reaper = &init_task;
49642 + int ret = 1;
49643 +
49644 + read_lock(&reaper->fs->lock);
49645 + realrootmnt = mntget(reaper->fs->root.mnt);
49646 + realroot = dget(reaper->fs->root.dentry);
49647 + read_unlock(&reaper->fs->lock);
49648 +
49649 + read_lock(&current->fs->lock);
49650 + currentmnt = mntget(current->fs->root.mnt);
49651 + currentroot = dget(current->fs->root.dentry);
49652 + read_unlock(&current->fs->lock);
49653 +
49654 + spin_lock(&dcache_lock);
49655 + for (;;) {
49656 + if (unlikely((dentry == realroot && mnt == realrootmnt)
49657 + || (dentry == currentroot && mnt == currentmnt)))
49658 + break;
49659 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
49660 + if (mnt->mnt_parent == mnt)
49661 + break;
49662 + dentry = mnt->mnt_mountpoint;
49663 + mnt = mnt->mnt_parent;
49664 + continue;
49665 + }
49666 + dentry = dentry->d_parent;
49667 + }
49668 + spin_unlock(&dcache_lock);
49669 +
49670 + dput(currentroot);
49671 + mntput(currentmnt);
49672 +
49673 + /* access is outside of chroot */
49674 + if (dentry == realroot && mnt == realrootmnt)
49675 + ret = 0;
49676 +
49677 + dput(realroot);
49678 + mntput(realrootmnt);
49679 + return ret;
49680 +}
49681 +#endif
49682 +
49683 +int
49684 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
49685 +{
49686 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49687 + if (!grsec_enable_chroot_fchdir)
49688 + return 1;
49689 +
49690 + if (!proc_is_chrooted(current))
49691 + return 1;
49692 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
49693 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
49694 + return 0;
49695 + }
49696 +#endif
49697 + return 1;
49698 +}
49699 +
49700 +int
49701 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49702 + const time_t shm_createtime)
49703 +{
49704 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49705 + struct pid *pid = NULL;
49706 + time_t starttime;
49707 +
49708 + if (unlikely(!grsec_enable_chroot_shmat))
49709 + return 1;
49710 +
49711 + if (likely(!proc_is_chrooted(current)))
49712 + return 1;
49713 +
49714 + rcu_read_lock();
49715 + read_lock(&tasklist_lock);
49716 +
49717 + pid = find_vpid(shm_cprid);
49718 + if (pid) {
49719 + struct task_struct *p;
49720 + p = pid_task(pid, PIDTYPE_PID);
49721 + if (p == NULL)
49722 + goto unlock;
49723 + starttime = p->start_time.tv_sec;
49724 + if (unlikely(!have_same_root(current, p) &&
49725 + time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime))) {
49726 + read_unlock(&tasklist_lock);
49727 + rcu_read_unlock();
49728 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
49729 + return 0;
49730 + }
49731 + } else {
49732 + pid = find_vpid(shm_lapid);
49733 + if (pid) {
49734 + struct task_struct *p;
49735 + p = pid_task(pid, PIDTYPE_PID);
49736 + if (p == NULL)
49737 + goto unlock;
49738 + if (unlikely(!have_same_root(current, p))) {
49739 + read_unlock(&tasklist_lock);
49740 + rcu_read_unlock();
49741 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
49742 + return 0;
49743 + }
49744 + }
49745 + }
49746 +
49747 +unlock:
49748 + read_unlock(&tasklist_lock);
49749 + rcu_read_unlock();
49750 +#endif
49751 + return 1;
49752 +}
49753 +
49754 +void
49755 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
49756 +{
49757 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49758 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
49759 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
49760 +#endif
49761 + return;
49762 +}
49763 +
49764 +int
49765 +gr_handle_chroot_mknod(const struct dentry *dentry,
49766 + const struct vfsmount *mnt, const int mode)
49767 +{
49768 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49769 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
49770 + proc_is_chrooted(current)) {
49771 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
49772 + return -EPERM;
49773 + }
49774 +#endif
49775 + return 0;
49776 +}
49777 +
49778 +int
49779 +gr_handle_chroot_mount(const struct dentry *dentry,
49780 + const struct vfsmount *mnt, const char *dev_name)
49781 +{
49782 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49783 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
49784 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
49785 + return -EPERM;
49786 + }
49787 +#endif
49788 + return 0;
49789 +}
49790 +
49791 +int
49792 +gr_handle_chroot_pivot(void)
49793 +{
49794 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49795 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
49796 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
49797 + return -EPERM;
49798 + }
49799 +#endif
49800 + return 0;
49801 +}
49802 +
49803 +int
49804 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
49805 +{
49806 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49807 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
49808 + !gr_is_outside_chroot(dentry, mnt)) {
49809 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
49810 + return -EPERM;
49811 + }
49812 +#endif
49813 + return 0;
49814 +}
49815 +
49816 +int
49817 +gr_handle_chroot_caps(struct path *path)
49818 +{
49819 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49820 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
49821 + (init_task.fs->root.dentry != path->dentry) &&
49822 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
49823 +
49824 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
49825 + const struct cred *old = current_cred();
49826 + struct cred *new = prepare_creds();
49827 + if (new == NULL)
49828 + return 1;
49829 +
49830 + new->cap_permitted = cap_drop(old->cap_permitted,
49831 + chroot_caps);
49832 + new->cap_inheritable = cap_drop(old->cap_inheritable,
49833 + chroot_caps);
49834 + new->cap_effective = cap_drop(old->cap_effective,
49835 + chroot_caps);
49836 +
49837 + commit_creds(new);
49838 +
49839 + return 0;
49840 + }
49841 +#endif
49842 + return 0;
49843 +}
49844 +
49845 +int
49846 +gr_handle_chroot_sysctl(const int op)
49847 +{
49848 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49849 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
49850 + && (op & MAY_WRITE))
49851 + return -EACCES;
49852 +#endif
49853 + return 0;
49854 +}
49855 +
49856 +void
49857 +gr_handle_chroot_chdir(struct path *path)
49858 +{
49859 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49860 + if (grsec_enable_chroot_chdir)
49861 + set_fs_pwd(current->fs, path);
49862 +#endif
49863 + return;
49864 +}
49865 +
49866 +int
49867 +gr_handle_chroot_chmod(const struct dentry *dentry,
49868 + const struct vfsmount *mnt, const int mode)
49869 +{
49870 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49871 + /* allow chmod +s on directories, but not on files */
49872 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
49873 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
49874 + proc_is_chrooted(current)) {
49875 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
49876 + return -EPERM;
49877 + }
49878 +#endif
49879 + return 0;
49880 +}
49881 +
49882 +#ifdef CONFIG_SECURITY
49883 +EXPORT_SYMBOL(gr_handle_chroot_caps);
49884 +#endif
49885 diff -urNp linux-2.6.32.42/grsecurity/grsec_disabled.c linux-2.6.32.42/grsecurity/grsec_disabled.c
49886 --- linux-2.6.32.42/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
49887 +++ linux-2.6.32.42/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
49888 @@ -0,0 +1,447 @@
49889 +#include <linux/kernel.h>
49890 +#include <linux/module.h>
49891 +#include <linux/sched.h>
49892 +#include <linux/file.h>
49893 +#include <linux/fs.h>
49894 +#include <linux/kdev_t.h>
49895 +#include <linux/net.h>
49896 +#include <linux/in.h>
49897 +#include <linux/ip.h>
49898 +#include <linux/skbuff.h>
49899 +#include <linux/sysctl.h>
49900 +
49901 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
49902 +void
49903 +pax_set_initial_flags(struct linux_binprm *bprm)
49904 +{
49905 + return;
49906 +}
49907 +#endif
49908 +
49909 +#ifdef CONFIG_SYSCTL
49910 +__u32
49911 +gr_handle_sysctl(const struct ctl_table * table, const int op)
49912 +{
49913 + return 0;
49914 +}
49915 +#endif
49916 +
49917 +#ifdef CONFIG_TASKSTATS
49918 +int gr_is_taskstats_denied(int pid)
49919 +{
49920 + return 0;
49921 +}
49922 +#endif
49923 +
49924 +int
49925 +gr_acl_is_enabled(void)
49926 +{
49927 + return 0;
49928 +}
49929 +
49930 +int
49931 +gr_handle_rawio(const struct inode *inode)
49932 +{
49933 + return 0;
49934 +}
49935 +
49936 +void
49937 +gr_acl_handle_psacct(struct task_struct *task, const long code)
49938 +{
49939 + return;
49940 +}
49941 +
49942 +int
49943 +gr_handle_ptrace(struct task_struct *task, const long request)
49944 +{
49945 + return 0;
49946 +}
49947 +
49948 +int
49949 +gr_handle_proc_ptrace(struct task_struct *task)
49950 +{
49951 + return 0;
49952 +}
49953 +
49954 +void
49955 +gr_learn_resource(const struct task_struct *task,
49956 + const int res, const unsigned long wanted, const int gt)
49957 +{
49958 + return;
49959 +}
49960 +
49961 +int
49962 +gr_set_acls(const int type)
49963 +{
49964 + return 0;
49965 +}
49966 +
49967 +int
49968 +gr_check_hidden_task(const struct task_struct *tsk)
49969 +{
49970 + return 0;
49971 +}
49972 +
49973 +int
49974 +gr_check_protected_task(const struct task_struct *task)
49975 +{
49976 + return 0;
49977 +}
49978 +
49979 +int
49980 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49981 +{
49982 + return 0;
49983 +}
49984 +
49985 +void
49986 +gr_copy_label(struct task_struct *tsk)
49987 +{
49988 + return;
49989 +}
49990 +
49991 +void
49992 +gr_set_pax_flags(struct task_struct *task)
49993 +{
49994 + return;
49995 +}
49996 +
49997 +int
49998 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49999 + const int unsafe_share)
50000 +{
50001 + return 0;
50002 +}
50003 +
50004 +void
50005 +gr_handle_delete(const ino_t ino, const dev_t dev)
50006 +{
50007 + return;
50008 +}
50009 +
50010 +void
50011 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50012 +{
50013 + return;
50014 +}
50015 +
50016 +void
50017 +gr_handle_crash(struct task_struct *task, const int sig)
50018 +{
50019 + return;
50020 +}
50021 +
50022 +int
50023 +gr_check_crash_exec(const struct file *filp)
50024 +{
50025 + return 0;
50026 +}
50027 +
50028 +int
50029 +gr_check_crash_uid(const uid_t uid)
50030 +{
50031 + return 0;
50032 +}
50033 +
50034 +void
50035 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50036 + struct dentry *old_dentry,
50037 + struct dentry *new_dentry,
50038 + struct vfsmount *mnt, const __u8 replace)
50039 +{
50040 + return;
50041 +}
50042 +
50043 +int
50044 +gr_search_socket(const int family, const int type, const int protocol)
50045 +{
50046 + return 1;
50047 +}
50048 +
50049 +int
50050 +gr_search_connectbind(const int mode, const struct socket *sock,
50051 + const struct sockaddr_in *addr)
50052 +{
50053 + return 0;
50054 +}
50055 +
50056 +int
50057 +gr_is_capable(const int cap)
50058 +{
50059 + return 1;
50060 +}
50061 +
50062 +int
50063 +gr_is_capable_nolog(const int cap)
50064 +{
50065 + return 1;
50066 +}
50067 +
50068 +void
50069 +gr_handle_alertkill(struct task_struct *task)
50070 +{
50071 + return;
50072 +}
50073 +
50074 +__u32
50075 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
50076 +{
50077 + return 1;
50078 +}
50079 +
50080 +__u32
50081 +gr_acl_handle_hidden_file(const struct dentry * dentry,
50082 + const struct vfsmount * mnt)
50083 +{
50084 + return 1;
50085 +}
50086 +
50087 +__u32
50088 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50089 + const int fmode)
50090 +{
50091 + return 1;
50092 +}
50093 +
50094 +__u32
50095 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
50096 +{
50097 + return 1;
50098 +}
50099 +
50100 +__u32
50101 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
50102 +{
50103 + return 1;
50104 +}
50105 +
50106 +int
50107 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
50108 + unsigned int *vm_flags)
50109 +{
50110 + return 1;
50111 +}
50112 +
50113 +__u32
50114 +gr_acl_handle_truncate(const struct dentry * dentry,
50115 + const struct vfsmount * mnt)
50116 +{
50117 + return 1;
50118 +}
50119 +
50120 +__u32
50121 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
50122 +{
50123 + return 1;
50124 +}
50125 +
50126 +__u32
50127 +gr_acl_handle_access(const struct dentry * dentry,
50128 + const struct vfsmount * mnt, const int fmode)
50129 +{
50130 + return 1;
50131 +}
50132 +
50133 +__u32
50134 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
50135 + mode_t mode)
50136 +{
50137 + return 1;
50138 +}
50139 +
50140 +__u32
50141 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
50142 + mode_t mode)
50143 +{
50144 + return 1;
50145 +}
50146 +
50147 +__u32
50148 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
50149 +{
50150 + return 1;
50151 +}
50152 +
50153 +__u32
50154 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
50155 +{
50156 + return 1;
50157 +}
50158 +
50159 +void
50160 +grsecurity_init(void)
50161 +{
50162 + return;
50163 +}
50164 +
50165 +__u32
50166 +gr_acl_handle_mknod(const struct dentry * new_dentry,
50167 + const struct dentry * parent_dentry,
50168 + const struct vfsmount * parent_mnt,
50169 + const int mode)
50170 +{
50171 + return 1;
50172 +}
50173 +
50174 +__u32
50175 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
50176 + const struct dentry * parent_dentry,
50177 + const struct vfsmount * parent_mnt)
50178 +{
50179 + return 1;
50180 +}
50181 +
50182 +__u32
50183 +gr_acl_handle_symlink(const struct dentry * new_dentry,
50184 + const struct dentry * parent_dentry,
50185 + const struct vfsmount * parent_mnt, const char *from)
50186 +{
50187 + return 1;
50188 +}
50189 +
50190 +__u32
50191 +gr_acl_handle_link(const struct dentry * new_dentry,
50192 + const struct dentry * parent_dentry,
50193 + const struct vfsmount * parent_mnt,
50194 + const struct dentry * old_dentry,
50195 + const struct vfsmount * old_mnt, const char *to)
50196 +{
50197 + return 1;
50198 +}
50199 +
50200 +int
50201 +gr_acl_handle_rename(const struct dentry *new_dentry,
50202 + const struct dentry *parent_dentry,
50203 + const struct vfsmount *parent_mnt,
50204 + const struct dentry *old_dentry,
50205 + const struct inode *old_parent_inode,
50206 + const struct vfsmount *old_mnt, const char *newname)
50207 +{
50208 + return 0;
50209 +}
50210 +
50211 +int
50212 +gr_acl_handle_filldir(const struct file *file, const char *name,
50213 + const int namelen, const ino_t ino)
50214 +{
50215 + return 1;
50216 +}
50217 +
50218 +int
50219 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50220 + const time_t shm_createtime, const uid_t cuid, const int shmid)
50221 +{
50222 + return 1;
50223 +}
50224 +
50225 +int
50226 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
50227 +{
50228 + return 0;
50229 +}
50230 +
50231 +int
50232 +gr_search_accept(const struct socket *sock)
50233 +{
50234 + return 0;
50235 +}
50236 +
50237 +int
50238 +gr_search_listen(const struct socket *sock)
50239 +{
50240 + return 0;
50241 +}
50242 +
50243 +int
50244 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
50245 +{
50246 + return 0;
50247 +}
50248 +
50249 +__u32
50250 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
50251 +{
50252 + return 1;
50253 +}
50254 +
50255 +__u32
50256 +gr_acl_handle_creat(const struct dentry * dentry,
50257 + const struct dentry * p_dentry,
50258 + const struct vfsmount * p_mnt, const int fmode,
50259 + const int imode)
50260 +{
50261 + return 1;
50262 +}
50263 +
50264 +void
50265 +gr_acl_handle_exit(void)
50266 +{
50267 + return;
50268 +}
50269 +
50270 +int
50271 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50272 +{
50273 + return 1;
50274 +}
50275 +
50276 +void
50277 +gr_set_role_label(const uid_t uid, const gid_t gid)
50278 +{
50279 + return;
50280 +}
50281 +
50282 +int
50283 +gr_acl_handle_procpidmem(const struct task_struct *task)
50284 +{
50285 + return 0;
50286 +}
50287 +
50288 +int
50289 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
50290 +{
50291 + return 0;
50292 +}
50293 +
50294 +int
50295 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
50296 +{
50297 + return 0;
50298 +}
50299 +
50300 +void
50301 +gr_set_kernel_label(struct task_struct *task)
50302 +{
50303 + return;
50304 +}
50305 +
50306 +int
50307 +gr_check_user_change(int real, int effective, int fs)
50308 +{
50309 + return 0;
50310 +}
50311 +
50312 +int
50313 +gr_check_group_change(int real, int effective, int fs)
50314 +{
50315 + return 0;
50316 +}
50317 +
50318 +int gr_acl_enable_at_secure(void)
50319 +{
50320 + return 0;
50321 +}
50322 +
50323 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50324 +{
50325 + return dentry->d_inode->i_sb->s_dev;
50326 +}
50327 +
50328 +EXPORT_SYMBOL(gr_is_capable);
50329 +EXPORT_SYMBOL(gr_is_capable_nolog);
50330 +EXPORT_SYMBOL(gr_learn_resource);
50331 +EXPORT_SYMBOL(gr_set_kernel_label);
50332 +#ifdef CONFIG_SECURITY
50333 +EXPORT_SYMBOL(gr_check_user_change);
50334 +EXPORT_SYMBOL(gr_check_group_change);
50335 +#endif
50336 diff -urNp linux-2.6.32.42/grsecurity/grsec_exec.c linux-2.6.32.42/grsecurity/grsec_exec.c
50337 --- linux-2.6.32.42/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
50338 +++ linux-2.6.32.42/grsecurity/grsec_exec.c 2011-04-17 15:56:46.000000000 -0400
50339 @@ -0,0 +1,148 @@
50340 +#include <linux/kernel.h>
50341 +#include <linux/sched.h>
50342 +#include <linux/file.h>
50343 +#include <linux/binfmts.h>
50344 +#include <linux/smp_lock.h>
50345 +#include <linux/fs.h>
50346 +#include <linux/types.h>
50347 +#include <linux/grdefs.h>
50348 +#include <linux/grinternal.h>
50349 +#include <linux/capability.h>
50350 +#include <linux/compat.h>
50351 +
50352 +#include <asm/uaccess.h>
50353 +
50354 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50355 +static char gr_exec_arg_buf[132];
50356 +static DEFINE_MUTEX(gr_exec_arg_mutex);
50357 +#endif
50358 +
50359 +int
50360 +gr_handle_nproc(void)
50361 +{
50362 +#ifdef CONFIG_GRKERNSEC_EXECVE
50363 + const struct cred *cred = current_cred();
50364 + if (grsec_enable_execve && cred->user &&
50365 + (atomic_read(&cred->user->processes) >
50366 + current->signal->rlim[RLIMIT_NPROC].rlim_cur) &&
50367 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
50368 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
50369 + return -EAGAIN;
50370 + }
50371 +#endif
50372 + return 0;
50373 +}
50374 +
50375 +void
50376 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
50377 +{
50378 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50379 + char *grarg = gr_exec_arg_buf;
50380 + unsigned int i, x, execlen = 0;
50381 + char c;
50382 +
50383 + if (!((grsec_enable_execlog && grsec_enable_group &&
50384 + in_group_p(grsec_audit_gid))
50385 + || (grsec_enable_execlog && !grsec_enable_group)))
50386 + return;
50387 +
50388 + mutex_lock(&gr_exec_arg_mutex);
50389 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
50390 +
50391 + if (unlikely(argv == NULL))
50392 + goto log;
50393 +
50394 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
50395 + const char __user *p;
50396 + unsigned int len;
50397 +
50398 + if (copy_from_user(&p, argv + i, sizeof(p)))
50399 + goto log;
50400 + if (!p)
50401 + goto log;
50402 + len = strnlen_user(p, 128 - execlen);
50403 + if (len > 128 - execlen)
50404 + len = 128 - execlen;
50405 + else if (len > 0)
50406 + len--;
50407 + if (copy_from_user(grarg + execlen, p, len))
50408 + goto log;
50409 +
50410 + /* rewrite unprintable characters */
50411 + for (x = 0; x < len; x++) {
50412 + c = *(grarg + execlen + x);
50413 + if (c < 32 || c > 126)
50414 + *(grarg + execlen + x) = ' ';
50415 + }
50416 +
50417 + execlen += len;
50418 + *(grarg + execlen) = ' ';
50419 + *(grarg + execlen + 1) = '\0';
50420 + execlen++;
50421 + }
50422 +
50423 + log:
50424 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50425 + bprm->file->f_path.mnt, grarg);
50426 + mutex_unlock(&gr_exec_arg_mutex);
50427 +#endif
50428 + return;
50429 +}
50430 +
50431 +#ifdef CONFIG_COMPAT
50432 +void
50433 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
50434 +{
50435 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50436 + char *grarg = gr_exec_arg_buf;
50437 + unsigned int i, x, execlen = 0;
50438 + char c;
50439 +
50440 + if (!((grsec_enable_execlog && grsec_enable_group &&
50441 + in_group_p(grsec_audit_gid))
50442 + || (grsec_enable_execlog && !grsec_enable_group)))
50443 + return;
50444 +
50445 + mutex_lock(&gr_exec_arg_mutex);
50446 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
50447 +
50448 + if (unlikely(argv == NULL))
50449 + goto log;
50450 +
50451 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
50452 + compat_uptr_t p;
50453 + unsigned int len;
50454 +
50455 + if (get_user(p, argv + i))
50456 + goto log;
50457 + len = strnlen_user(compat_ptr(p), 128 - execlen);
50458 + if (len > 128 - execlen)
50459 + len = 128 - execlen;
50460 + else if (len > 0)
50461 + len--;
50462 + else
50463 + goto log;
50464 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
50465 + goto log;
50466 +
50467 + /* rewrite unprintable characters */
50468 + for (x = 0; x < len; x++) {
50469 + c = *(grarg + execlen + x);
50470 + if (c < 32 || c > 126)
50471 + *(grarg + execlen + x) = ' ';
50472 + }
50473 +
50474 + execlen += len;
50475 + *(grarg + execlen) = ' ';
50476 + *(grarg + execlen + 1) = '\0';
50477 + execlen++;
50478 + }
50479 +
50480 + log:
50481 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50482 + bprm->file->f_path.mnt, grarg);
50483 + mutex_unlock(&gr_exec_arg_mutex);
50484 +#endif
50485 + return;
50486 +}
50487 +#endif
50488 diff -urNp linux-2.6.32.42/grsecurity/grsec_fifo.c linux-2.6.32.42/grsecurity/grsec_fifo.c
50489 --- linux-2.6.32.42/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
50490 +++ linux-2.6.32.42/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
50491 @@ -0,0 +1,24 @@
50492 +#include <linux/kernel.h>
50493 +#include <linux/sched.h>
50494 +#include <linux/fs.h>
50495 +#include <linux/file.h>
50496 +#include <linux/grinternal.h>
50497 +
50498 +int
50499 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
50500 + const struct dentry *dir, const int flag, const int acc_mode)
50501 +{
50502 +#ifdef CONFIG_GRKERNSEC_FIFO
50503 + const struct cred *cred = current_cred();
50504 +
50505 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
50506 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
50507 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
50508 + (cred->fsuid != dentry->d_inode->i_uid)) {
50509 + if (!inode_permission(dentry->d_inode, acc_mode))
50510 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
50511 + return -EACCES;
50512 + }
50513 +#endif
50514 + return 0;
50515 +}
50516 diff -urNp linux-2.6.32.42/grsecurity/grsec_fork.c linux-2.6.32.42/grsecurity/grsec_fork.c
50517 --- linux-2.6.32.42/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
50518 +++ linux-2.6.32.42/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
50519 @@ -0,0 +1,23 @@
50520 +#include <linux/kernel.h>
50521 +#include <linux/sched.h>
50522 +#include <linux/grsecurity.h>
50523 +#include <linux/grinternal.h>
50524 +#include <linux/errno.h>
50525 +
50526 +void
50527 +gr_log_forkfail(const int retval)
50528 +{
50529 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
50530 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
50531 + switch (retval) {
50532 + case -EAGAIN:
50533 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
50534 + break;
50535 + case -ENOMEM:
50536 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
50537 + break;
50538 + }
50539 + }
50540 +#endif
50541 + return;
50542 +}
50543 diff -urNp linux-2.6.32.42/grsecurity/grsec_init.c linux-2.6.32.42/grsecurity/grsec_init.c
50544 --- linux-2.6.32.42/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
50545 +++ linux-2.6.32.42/grsecurity/grsec_init.c 2011-04-17 15:56:46.000000000 -0400
50546 @@ -0,0 +1,270 @@
50547 +#include <linux/kernel.h>
50548 +#include <linux/sched.h>
50549 +#include <linux/mm.h>
50550 +#include <linux/smp_lock.h>
50551 +#include <linux/gracl.h>
50552 +#include <linux/slab.h>
50553 +#include <linux/vmalloc.h>
50554 +#include <linux/percpu.h>
50555 +#include <linux/module.h>
50556 +
50557 +int grsec_enable_link;
50558 +int grsec_enable_dmesg;
50559 +int grsec_enable_harden_ptrace;
50560 +int grsec_enable_fifo;
50561 +int grsec_enable_execve;
50562 +int grsec_enable_execlog;
50563 +int grsec_enable_signal;
50564 +int grsec_enable_forkfail;
50565 +int grsec_enable_audit_ptrace;
50566 +int grsec_enable_time;
50567 +int grsec_enable_audit_textrel;
50568 +int grsec_enable_group;
50569 +int grsec_audit_gid;
50570 +int grsec_enable_chdir;
50571 +int grsec_enable_mount;
50572 +int grsec_enable_rofs;
50573 +int grsec_enable_chroot_findtask;
50574 +int grsec_enable_chroot_mount;
50575 +int grsec_enable_chroot_shmat;
50576 +int grsec_enable_chroot_fchdir;
50577 +int grsec_enable_chroot_double;
50578 +int grsec_enable_chroot_pivot;
50579 +int grsec_enable_chroot_chdir;
50580 +int grsec_enable_chroot_chmod;
50581 +int grsec_enable_chroot_mknod;
50582 +int grsec_enable_chroot_nice;
50583 +int grsec_enable_chroot_execlog;
50584 +int grsec_enable_chroot_caps;
50585 +int grsec_enable_chroot_sysctl;
50586 +int grsec_enable_chroot_unix;
50587 +int grsec_enable_tpe;
50588 +int grsec_tpe_gid;
50589 +int grsec_enable_blackhole;
50590 +#ifdef CONFIG_IPV6_MODULE
50591 +EXPORT_SYMBOL(grsec_enable_blackhole);
50592 +#endif
50593 +int grsec_lastack_retries;
50594 +int grsec_enable_tpe_all;
50595 +int grsec_enable_tpe_invert;
50596 +int grsec_enable_socket_all;
50597 +int grsec_socket_all_gid;
50598 +int grsec_enable_socket_client;
50599 +int grsec_socket_client_gid;
50600 +int grsec_enable_socket_server;
50601 +int grsec_socket_server_gid;
50602 +int grsec_resource_logging;
50603 +int grsec_disable_privio;
50604 +int grsec_enable_log_rwxmaps;
50605 +int grsec_lock;
50606 +
50607 +DEFINE_SPINLOCK(grsec_alert_lock);
50608 +unsigned long grsec_alert_wtime = 0;
50609 +unsigned long grsec_alert_fyet = 0;
50610 +
50611 +DEFINE_SPINLOCK(grsec_audit_lock);
50612 +
50613 +DEFINE_RWLOCK(grsec_exec_file_lock);
50614 +
50615 +char *gr_shared_page[4];
50616 +
50617 +char *gr_alert_log_fmt;
50618 +char *gr_audit_log_fmt;
50619 +char *gr_alert_log_buf;
50620 +char *gr_audit_log_buf;
50621 +
50622 +extern struct gr_arg *gr_usermode;
50623 +extern unsigned char *gr_system_salt;
50624 +extern unsigned char *gr_system_sum;
50625 +
50626 +void __init
50627 +grsecurity_init(void)
50628 +{
50629 + int j;
50630 + /* create the per-cpu shared pages */
50631 +
50632 +#ifdef CONFIG_X86
50633 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
50634 +#endif
50635 +
50636 + for (j = 0; j < 4; j++) {
50637 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
50638 + if (gr_shared_page[j] == NULL) {
50639 + panic("Unable to allocate grsecurity shared page");
50640 + return;
50641 + }
50642 + }
50643 +
50644 + /* allocate log buffers */
50645 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
50646 + if (!gr_alert_log_fmt) {
50647 + panic("Unable to allocate grsecurity alert log format buffer");
50648 + return;
50649 + }
50650 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
50651 + if (!gr_audit_log_fmt) {
50652 + panic("Unable to allocate grsecurity audit log format buffer");
50653 + return;
50654 + }
50655 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50656 + if (!gr_alert_log_buf) {
50657 + panic("Unable to allocate grsecurity alert log buffer");
50658 + return;
50659 + }
50660 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50661 + if (!gr_audit_log_buf) {
50662 + panic("Unable to allocate grsecurity audit log buffer");
50663 + return;
50664 + }
50665 +
50666 + /* allocate memory for authentication structure */
50667 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
50668 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
50669 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
50670 +
50671 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
50672 + panic("Unable to allocate grsecurity authentication structure");
50673 + return;
50674 + }
50675 +
50676 +
50677 +#ifdef CONFIG_GRKERNSEC_IO
50678 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
50679 + grsec_disable_privio = 1;
50680 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50681 + grsec_disable_privio = 1;
50682 +#else
50683 + grsec_disable_privio = 0;
50684 +#endif
50685 +#endif
50686 +
50687 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
50688 + /* for backward compatibility, tpe_invert always defaults to on if
50689 + enabled in the kernel
50690 + */
50691 + grsec_enable_tpe_invert = 1;
50692 +#endif
50693 +
50694 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50695 +#ifndef CONFIG_GRKERNSEC_SYSCTL
50696 + grsec_lock = 1;
50697 +#endif
50698 +
50699 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
50700 + grsec_enable_audit_textrel = 1;
50701 +#endif
50702 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
50703 + grsec_enable_log_rwxmaps = 1;
50704 +#endif
50705 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
50706 + grsec_enable_group = 1;
50707 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
50708 +#endif
50709 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
50710 + grsec_enable_chdir = 1;
50711 +#endif
50712 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50713 + grsec_enable_harden_ptrace = 1;
50714 +#endif
50715 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
50716 + grsec_enable_mount = 1;
50717 +#endif
50718 +#ifdef CONFIG_GRKERNSEC_LINK
50719 + grsec_enable_link = 1;
50720 +#endif
50721 +#ifdef CONFIG_GRKERNSEC_DMESG
50722 + grsec_enable_dmesg = 1;
50723 +#endif
50724 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
50725 + grsec_enable_blackhole = 1;
50726 + grsec_lastack_retries = 4;
50727 +#endif
50728 +#ifdef CONFIG_GRKERNSEC_FIFO
50729 + grsec_enable_fifo = 1;
50730 +#endif
50731 +#ifdef CONFIG_GRKERNSEC_EXECVE
50732 + grsec_enable_execve = 1;
50733 +#endif
50734 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50735 + grsec_enable_execlog = 1;
50736 +#endif
50737 +#ifdef CONFIG_GRKERNSEC_SIGNAL
50738 + grsec_enable_signal = 1;
50739 +#endif
50740 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
50741 + grsec_enable_forkfail = 1;
50742 +#endif
50743 +#ifdef CONFIG_GRKERNSEC_TIME
50744 + grsec_enable_time = 1;
50745 +#endif
50746 +#ifdef CONFIG_GRKERNSEC_RESLOG
50747 + grsec_resource_logging = 1;
50748 +#endif
50749 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50750 + grsec_enable_chroot_findtask = 1;
50751 +#endif
50752 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
50753 + grsec_enable_chroot_unix = 1;
50754 +#endif
50755 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50756 + grsec_enable_chroot_mount = 1;
50757 +#endif
50758 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
50759 + grsec_enable_chroot_fchdir = 1;
50760 +#endif
50761 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50762 + grsec_enable_chroot_shmat = 1;
50763 +#endif
50764 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
50765 + grsec_enable_audit_ptrace = 1;
50766 +#endif
50767 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50768 + grsec_enable_chroot_double = 1;
50769 +#endif
50770 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50771 + grsec_enable_chroot_pivot = 1;
50772 +#endif
50773 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50774 + grsec_enable_chroot_chdir = 1;
50775 +#endif
50776 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50777 + grsec_enable_chroot_chmod = 1;
50778 +#endif
50779 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50780 + grsec_enable_chroot_mknod = 1;
50781 +#endif
50782 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50783 + grsec_enable_chroot_nice = 1;
50784 +#endif
50785 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
50786 + grsec_enable_chroot_execlog = 1;
50787 +#endif
50788 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50789 + grsec_enable_chroot_caps = 1;
50790 +#endif
50791 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50792 + grsec_enable_chroot_sysctl = 1;
50793 +#endif
50794 +#ifdef CONFIG_GRKERNSEC_TPE
50795 + grsec_enable_tpe = 1;
50796 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
50797 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
50798 + grsec_enable_tpe_all = 1;
50799 +#endif
50800 +#endif
50801 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
50802 + grsec_enable_socket_all = 1;
50803 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
50804 +#endif
50805 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
50806 + grsec_enable_socket_client = 1;
50807 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
50808 +#endif
50809 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
50810 + grsec_enable_socket_server = 1;
50811 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
50812 +#endif
50813 +#endif
50814 +
50815 + return;
50816 +}
50817 diff -urNp linux-2.6.32.42/grsecurity/grsec_link.c linux-2.6.32.42/grsecurity/grsec_link.c
50818 --- linux-2.6.32.42/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
50819 +++ linux-2.6.32.42/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
50820 @@ -0,0 +1,43 @@
50821 +#include <linux/kernel.h>
50822 +#include <linux/sched.h>
50823 +#include <linux/fs.h>
50824 +#include <linux/file.h>
50825 +#include <linux/grinternal.h>
50826 +
50827 +int
50828 +gr_handle_follow_link(const struct inode *parent,
50829 + const struct inode *inode,
50830 + const struct dentry *dentry, const struct vfsmount *mnt)
50831 +{
50832 +#ifdef CONFIG_GRKERNSEC_LINK
50833 + const struct cred *cred = current_cred();
50834 +
50835 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
50836 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
50837 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
50838 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
50839 + return -EACCES;
50840 + }
50841 +#endif
50842 + return 0;
50843 +}
50844 +
50845 +int
50846 +gr_handle_hardlink(const struct dentry *dentry,
50847 + const struct vfsmount *mnt,
50848 + struct inode *inode, const int mode, const char *to)
50849 +{
50850 +#ifdef CONFIG_GRKERNSEC_LINK
50851 + const struct cred *cred = current_cred();
50852 +
50853 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
50854 + (!S_ISREG(mode) || (mode & S_ISUID) ||
50855 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
50856 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
50857 + !capable(CAP_FOWNER) && cred->uid) {
50858 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
50859 + return -EPERM;
50860 + }
50861 +#endif
50862 + return 0;
50863 +}
50864 diff -urNp linux-2.6.32.42/grsecurity/grsec_log.c linux-2.6.32.42/grsecurity/grsec_log.c
50865 --- linux-2.6.32.42/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
50866 +++ linux-2.6.32.42/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
50867 @@ -0,0 +1,310 @@
50868 +#include <linux/kernel.h>
50869 +#include <linux/sched.h>
50870 +#include <linux/file.h>
50871 +#include <linux/tty.h>
50872 +#include <linux/fs.h>
50873 +#include <linux/grinternal.h>
50874 +
50875 +#ifdef CONFIG_TREE_PREEMPT_RCU
50876 +#define DISABLE_PREEMPT() preempt_disable()
50877 +#define ENABLE_PREEMPT() preempt_enable()
50878 +#else
50879 +#define DISABLE_PREEMPT()
50880 +#define ENABLE_PREEMPT()
50881 +#endif
50882 +
50883 +#define BEGIN_LOCKS(x) \
50884 + DISABLE_PREEMPT(); \
50885 + rcu_read_lock(); \
50886 + read_lock(&tasklist_lock); \
50887 + read_lock(&grsec_exec_file_lock); \
50888 + if (x != GR_DO_AUDIT) \
50889 + spin_lock(&grsec_alert_lock); \
50890 + else \
50891 + spin_lock(&grsec_audit_lock)
50892 +
50893 +#define END_LOCKS(x) \
50894 + if (x != GR_DO_AUDIT) \
50895 + spin_unlock(&grsec_alert_lock); \
50896 + else \
50897 + spin_unlock(&grsec_audit_lock); \
50898 + read_unlock(&grsec_exec_file_lock); \
50899 + read_unlock(&tasklist_lock); \
50900 + rcu_read_unlock(); \
50901 + ENABLE_PREEMPT(); \
50902 + if (x == GR_DONT_AUDIT) \
50903 + gr_handle_alertkill(current)
50904 +
50905 +enum {
50906 + FLOODING,
50907 + NO_FLOODING
50908 +};
50909 +
50910 +extern char *gr_alert_log_fmt;
50911 +extern char *gr_audit_log_fmt;
50912 +extern char *gr_alert_log_buf;
50913 +extern char *gr_audit_log_buf;
50914 +
50915 +static int gr_log_start(int audit)
50916 +{
50917 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
50918 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
50919 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50920 +
50921 + if (audit == GR_DO_AUDIT)
50922 + goto set_fmt;
50923 +
50924 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
50925 + grsec_alert_wtime = jiffies;
50926 + grsec_alert_fyet = 0;
50927 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
50928 + grsec_alert_fyet++;
50929 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
50930 + grsec_alert_wtime = jiffies;
50931 + grsec_alert_fyet++;
50932 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
50933 + return FLOODING;
50934 + } else return FLOODING;
50935 +
50936 +set_fmt:
50937 + memset(buf, 0, PAGE_SIZE);
50938 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
50939 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
50940 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
50941 + } else if (current->signal->curr_ip) {
50942 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
50943 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
50944 + } else if (gr_acl_is_enabled()) {
50945 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
50946 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
50947 + } else {
50948 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
50949 + strcpy(buf, fmt);
50950 + }
50951 +
50952 + return NO_FLOODING;
50953 +}
50954 +
50955 +static void gr_log_middle(int audit, const char *msg, va_list ap)
50956 + __attribute__ ((format (printf, 2, 0)));
50957 +
50958 +static void gr_log_middle(int audit, const char *msg, va_list ap)
50959 +{
50960 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50961 + unsigned int len = strlen(buf);
50962 +
50963 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
50964 +
50965 + return;
50966 +}
50967 +
50968 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
50969 + __attribute__ ((format (printf, 2, 3)));
50970 +
50971 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
50972 +{
50973 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50974 + unsigned int len = strlen(buf);
50975 + va_list ap;
50976 +
50977 + va_start(ap, msg);
50978 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
50979 + va_end(ap);
50980 +
50981 + return;
50982 +}
50983 +
50984 +static void gr_log_end(int audit)
50985 +{
50986 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50987 + unsigned int len = strlen(buf);
50988 +
50989 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
50990 + printk("%s\n", buf);
50991 +
50992 + return;
50993 +}
50994 +
50995 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
50996 +{
50997 + int logtype;
50998 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
50999 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
51000 + void *voidptr = NULL;
51001 + int num1 = 0, num2 = 0;
51002 + unsigned long ulong1 = 0, ulong2 = 0;
51003 + struct dentry *dentry = NULL;
51004 + struct vfsmount *mnt = NULL;
51005 + struct file *file = NULL;
51006 + struct task_struct *task = NULL;
51007 + const struct cred *cred, *pcred;
51008 + va_list ap;
51009 +
51010 + BEGIN_LOCKS(audit);
51011 + logtype = gr_log_start(audit);
51012 + if (logtype == FLOODING) {
51013 + END_LOCKS(audit);
51014 + return;
51015 + }
51016 + va_start(ap, argtypes);
51017 + switch (argtypes) {
51018 + case GR_TTYSNIFF:
51019 + task = va_arg(ap, struct task_struct *);
51020 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
51021 + break;
51022 + case GR_SYSCTL_HIDDEN:
51023 + str1 = va_arg(ap, char *);
51024 + gr_log_middle_varargs(audit, msg, result, str1);
51025 + break;
51026 + case GR_RBAC:
51027 + dentry = va_arg(ap, struct dentry *);
51028 + mnt = va_arg(ap, struct vfsmount *);
51029 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
51030 + break;
51031 + case GR_RBAC_STR:
51032 + dentry = va_arg(ap, struct dentry *);
51033 + mnt = va_arg(ap, struct vfsmount *);
51034 + str1 = va_arg(ap, char *);
51035 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
51036 + break;
51037 + case GR_STR_RBAC:
51038 + str1 = va_arg(ap, char *);
51039 + dentry = va_arg(ap, struct dentry *);
51040 + mnt = va_arg(ap, struct vfsmount *);
51041 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
51042 + break;
51043 + case GR_RBAC_MODE2:
51044 + dentry = va_arg(ap, struct dentry *);
51045 + mnt = va_arg(ap, struct vfsmount *);
51046 + str1 = va_arg(ap, char *);
51047 + str2 = va_arg(ap, char *);
51048 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
51049 + break;
51050 + case GR_RBAC_MODE3:
51051 + dentry = va_arg(ap, struct dentry *);
51052 + mnt = va_arg(ap, struct vfsmount *);
51053 + str1 = va_arg(ap, char *);
51054 + str2 = va_arg(ap, char *);
51055 + str3 = va_arg(ap, char *);
51056 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
51057 + break;
51058 + case GR_FILENAME:
51059 + dentry = va_arg(ap, struct dentry *);
51060 + mnt = va_arg(ap, struct vfsmount *);
51061 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
51062 + break;
51063 + case GR_STR_FILENAME:
51064 + str1 = va_arg(ap, char *);
51065 + dentry = va_arg(ap, struct dentry *);
51066 + mnt = va_arg(ap, struct vfsmount *);
51067 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
51068 + break;
51069 + case GR_FILENAME_STR:
51070 + dentry = va_arg(ap, struct dentry *);
51071 + mnt = va_arg(ap, struct vfsmount *);
51072 + str1 = va_arg(ap, char *);
51073 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
51074 + break;
51075 + case GR_FILENAME_TWO_INT:
51076 + dentry = va_arg(ap, struct dentry *);
51077 + mnt = va_arg(ap, struct vfsmount *);
51078 + num1 = va_arg(ap, int);
51079 + num2 = va_arg(ap, int);
51080 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
51081 + break;
51082 + case GR_FILENAME_TWO_INT_STR:
51083 + dentry = va_arg(ap, struct dentry *);
51084 + mnt = va_arg(ap, struct vfsmount *);
51085 + num1 = va_arg(ap, int);
51086 + num2 = va_arg(ap, int);
51087 + str1 = va_arg(ap, char *);
51088 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
51089 + break;
51090 + case GR_TEXTREL:
51091 + file = va_arg(ap, struct file *);
51092 + ulong1 = va_arg(ap, unsigned long);
51093 + ulong2 = va_arg(ap, unsigned long);
51094 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
51095 + break;
51096 + case GR_PTRACE:
51097 + task = va_arg(ap, struct task_struct *);
51098 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
51099 + break;
51100 + case GR_RESOURCE:
51101 + task = va_arg(ap, struct task_struct *);
51102 + cred = __task_cred(task);
51103 + pcred = __task_cred(task->real_parent);
51104 + ulong1 = va_arg(ap, unsigned long);
51105 + str1 = va_arg(ap, char *);
51106 + ulong2 = va_arg(ap, unsigned long);
51107 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51108 + break;
51109 + case GR_CAP:
51110 + task = va_arg(ap, struct task_struct *);
51111 + cred = __task_cred(task);
51112 + pcred = __task_cred(task->real_parent);
51113 + str1 = va_arg(ap, char *);
51114 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51115 + break;
51116 + case GR_SIG:
51117 + str1 = va_arg(ap, char *);
51118 + voidptr = va_arg(ap, void *);
51119 + gr_log_middle_varargs(audit, msg, str1, voidptr);
51120 + break;
51121 + case GR_SIG2:
51122 + task = va_arg(ap, struct task_struct *);
51123 + cred = __task_cred(task);
51124 + pcred = __task_cred(task->real_parent);
51125 + num1 = va_arg(ap, int);
51126 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51127 + break;
51128 + case GR_CRASH1:
51129 + task = va_arg(ap, struct task_struct *);
51130 + cred = __task_cred(task);
51131 + pcred = __task_cred(task->real_parent);
51132 + ulong1 = va_arg(ap, unsigned long);
51133 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
51134 + break;
51135 + case GR_CRASH2:
51136 + task = va_arg(ap, struct task_struct *);
51137 + cred = __task_cred(task);
51138 + pcred = __task_cred(task->real_parent);
51139 + ulong1 = va_arg(ap, unsigned long);
51140 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
51141 + break;
51142 + case GR_RWXMAP:
51143 + file = va_arg(ap, struct file *);
51144 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
51145 + break;
51146 + case GR_PSACCT:
51147 + {
51148 + unsigned int wday, cday;
51149 + __u8 whr, chr;
51150 + __u8 wmin, cmin;
51151 + __u8 wsec, csec;
51152 + char cur_tty[64] = { 0 };
51153 + char parent_tty[64] = { 0 };
51154 +
51155 + task = va_arg(ap, struct task_struct *);
51156 + wday = va_arg(ap, unsigned int);
51157 + cday = va_arg(ap, unsigned int);
51158 + whr = va_arg(ap, int);
51159 + chr = va_arg(ap, int);
51160 + wmin = va_arg(ap, int);
51161 + cmin = va_arg(ap, int);
51162 + wsec = va_arg(ap, int);
51163 + csec = va_arg(ap, int);
51164 + ulong1 = va_arg(ap, unsigned long);
51165 + cred = __task_cred(task);
51166 + pcred = __task_cred(task->real_parent);
51167 +
51168 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51169 + }
51170 + break;
51171 + default:
51172 + gr_log_middle(audit, msg, ap);
51173 + }
51174 + va_end(ap);
51175 + gr_log_end(audit);
51176 + END_LOCKS(audit);
51177 +}
51178 diff -urNp linux-2.6.32.42/grsecurity/grsec_mem.c linux-2.6.32.42/grsecurity/grsec_mem.c
51179 --- linux-2.6.32.42/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
51180 +++ linux-2.6.32.42/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
51181 @@ -0,0 +1,33 @@
51182 +#include <linux/kernel.h>
51183 +#include <linux/sched.h>
51184 +#include <linux/mm.h>
51185 +#include <linux/mman.h>
51186 +#include <linux/grinternal.h>
51187 +
51188 +void
51189 +gr_handle_ioperm(void)
51190 +{
51191 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
51192 + return;
51193 +}
51194 +
51195 +void
51196 +gr_handle_iopl(void)
51197 +{
51198 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
51199 + return;
51200 +}
51201 +
51202 +void
51203 +gr_handle_mem_readwrite(u64 from, u64 to)
51204 +{
51205 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
51206 + return;
51207 +}
51208 +
51209 +void
51210 +gr_handle_vm86(void)
51211 +{
51212 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
51213 + return;
51214 +}
51215 diff -urNp linux-2.6.32.42/grsecurity/grsec_mount.c linux-2.6.32.42/grsecurity/grsec_mount.c
51216 --- linux-2.6.32.42/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
51217 +++ linux-2.6.32.42/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
51218 @@ -0,0 +1,62 @@
51219 +#include <linux/kernel.h>
51220 +#include <linux/sched.h>
51221 +#include <linux/mount.h>
51222 +#include <linux/grsecurity.h>
51223 +#include <linux/grinternal.h>
51224 +
51225 +void
51226 +gr_log_remount(const char *devname, const int retval)
51227 +{
51228 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51229 + if (grsec_enable_mount && (retval >= 0))
51230 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
51231 +#endif
51232 + return;
51233 +}
51234 +
51235 +void
51236 +gr_log_unmount(const char *devname, const int retval)
51237 +{
51238 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51239 + if (grsec_enable_mount && (retval >= 0))
51240 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
51241 +#endif
51242 + return;
51243 +}
51244 +
51245 +void
51246 +gr_log_mount(const char *from, const char *to, const int retval)
51247 +{
51248 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51249 + if (grsec_enable_mount && (retval >= 0))
51250 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
51251 +#endif
51252 + return;
51253 +}
51254 +
51255 +int
51256 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
51257 +{
51258 +#ifdef CONFIG_GRKERNSEC_ROFS
51259 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
51260 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
51261 + return -EPERM;
51262 + } else
51263 + return 0;
51264 +#endif
51265 + return 0;
51266 +}
51267 +
51268 +int
51269 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
51270 +{
51271 +#ifdef CONFIG_GRKERNSEC_ROFS
51272 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
51273 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
51274 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
51275 + return -EPERM;
51276 + } else
51277 + return 0;
51278 +#endif
51279 + return 0;
51280 +}
51281 diff -urNp linux-2.6.32.42/grsecurity/grsec_pax.c linux-2.6.32.42/grsecurity/grsec_pax.c
51282 --- linux-2.6.32.42/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
51283 +++ linux-2.6.32.42/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
51284 @@ -0,0 +1,36 @@
51285 +#include <linux/kernel.h>
51286 +#include <linux/sched.h>
51287 +#include <linux/mm.h>
51288 +#include <linux/file.h>
51289 +#include <linux/grinternal.h>
51290 +#include <linux/grsecurity.h>
51291 +
51292 +void
51293 +gr_log_textrel(struct vm_area_struct * vma)
51294 +{
51295 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51296 + if (grsec_enable_audit_textrel)
51297 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
51298 +#endif
51299 + return;
51300 +}
51301 +
51302 +void
51303 +gr_log_rwxmmap(struct file *file)
51304 +{
51305 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51306 + if (grsec_enable_log_rwxmaps)
51307 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
51308 +#endif
51309 + return;
51310 +}
51311 +
51312 +void
51313 +gr_log_rwxmprotect(struct file *file)
51314 +{
51315 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51316 + if (grsec_enable_log_rwxmaps)
51317 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
51318 +#endif
51319 + return;
51320 +}
51321 diff -urNp linux-2.6.32.42/grsecurity/grsec_ptrace.c linux-2.6.32.42/grsecurity/grsec_ptrace.c
51322 --- linux-2.6.32.42/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
51323 +++ linux-2.6.32.42/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
51324 @@ -0,0 +1,14 @@
51325 +#include <linux/kernel.h>
51326 +#include <linux/sched.h>
51327 +#include <linux/grinternal.h>
51328 +#include <linux/grsecurity.h>
51329 +
51330 +void
51331 +gr_audit_ptrace(struct task_struct *task)
51332 +{
51333 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51334 + if (grsec_enable_audit_ptrace)
51335 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
51336 +#endif
51337 + return;
51338 +}
51339 diff -urNp linux-2.6.32.42/grsecurity/grsec_sig.c linux-2.6.32.42/grsecurity/grsec_sig.c
51340 --- linux-2.6.32.42/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
51341 +++ linux-2.6.32.42/grsecurity/grsec_sig.c 2011-05-17 17:30:04.000000000 -0400
51342 @@ -0,0 +1,202 @@
51343 +#include <linux/kernel.h>
51344 +#include <linux/sched.h>
51345 +#include <linux/delay.h>
51346 +#include <linux/grsecurity.h>
51347 +#include <linux/grinternal.h>
51348 +#include <linux/hardirq.h>
51349 +
51350 +char *signames[] = {
51351 + [SIGSEGV] = "Segmentation fault",
51352 + [SIGILL] = "Illegal instruction",
51353 + [SIGABRT] = "Abort",
51354 + [SIGBUS] = "Invalid alignment/Bus error"
51355 +};
51356 +
51357 +void
51358 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
51359 +{
51360 +#ifdef CONFIG_GRKERNSEC_SIGNAL
51361 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
51362 + (sig == SIGABRT) || (sig == SIGBUS))) {
51363 + if (t->pid == current->pid) {
51364 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
51365 + } else {
51366 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
51367 + }
51368 + }
51369 +#endif
51370 + return;
51371 +}
51372 +
51373 +int
51374 +gr_handle_signal(const struct task_struct *p, const int sig)
51375 +{
51376 +#ifdef CONFIG_GRKERNSEC
51377 + if (current->pid > 1 && gr_check_protected_task(p)) {
51378 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
51379 + return -EPERM;
51380 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
51381 + return -EPERM;
51382 + }
51383 +#endif
51384 + return 0;
51385 +}
51386 +
51387 +#ifdef CONFIG_GRKERNSEC
51388 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
51389 +
51390 +int gr_fake_force_sig(int sig, struct task_struct *t)
51391 +{
51392 + unsigned long int flags;
51393 + int ret, blocked, ignored;
51394 + struct k_sigaction *action;
51395 +
51396 + spin_lock_irqsave(&t->sighand->siglock, flags);
51397 + action = &t->sighand->action[sig-1];
51398 + ignored = action->sa.sa_handler == SIG_IGN;
51399 + blocked = sigismember(&t->blocked, sig);
51400 + if (blocked || ignored) {
51401 + action->sa.sa_handler = SIG_DFL;
51402 + if (blocked) {
51403 + sigdelset(&t->blocked, sig);
51404 + recalc_sigpending_and_wake(t);
51405 + }
51406 + }
51407 + if (action->sa.sa_handler == SIG_DFL)
51408 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
51409 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
51410 +
51411 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
51412 +
51413 + return ret;
51414 +}
51415 +#endif
51416 +
51417 +#ifdef CONFIG_GRKERNSEC_BRUTE
51418 +#define GR_USER_BAN_TIME (15 * 60)
51419 +
51420 +static int __get_dumpable(unsigned long mm_flags)
51421 +{
51422 + int ret;
51423 +
51424 + ret = mm_flags & MMF_DUMPABLE_MASK;
51425 + return (ret >= 2) ? 2 : ret;
51426 +}
51427 +#endif
51428 +
51429 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
51430 +{
51431 +#ifdef CONFIG_GRKERNSEC_BRUTE
51432 + uid_t uid = 0;
51433 +
51434 + rcu_read_lock();
51435 + read_lock(&tasklist_lock);
51436 + read_lock(&grsec_exec_file_lock);
51437 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
51438 + p->real_parent->brute = 1;
51439 + else {
51440 + const struct cred *cred = __task_cred(p), *cred2;
51441 + struct task_struct *tsk, *tsk2;
51442 +
51443 + if (!__get_dumpable(mm_flags) && cred->uid) {
51444 + struct user_struct *user;
51445 +
51446 + uid = cred->uid;
51447 +
51448 + /* this is put upon execution past expiration */
51449 + user = find_user(uid);
51450 + if (user == NULL)
51451 + goto unlock;
51452 + user->banned = 1;
51453 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
51454 + if (user->ban_expires == ~0UL)
51455 + user->ban_expires--;
51456 +
51457 + do_each_thread(tsk2, tsk) {
51458 + cred2 = __task_cred(tsk);
51459 + if (tsk != p && cred2->uid == uid)
51460 + gr_fake_force_sig(SIGKILL, tsk);
51461 + } while_each_thread(tsk2, tsk);
51462 + }
51463 + }
51464 +unlock:
51465 + read_unlock(&grsec_exec_file_lock);
51466 + read_unlock(&tasklist_lock);
51467 + rcu_read_unlock();
51468 +
51469 + if (uid)
51470 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
51471 +#endif
51472 + return;
51473 +}
51474 +
51475 +void gr_handle_brute_check(void)
51476 +{
51477 +#ifdef CONFIG_GRKERNSEC_BRUTE
51478 + if (current->brute)
51479 + msleep(30 * 1000);
51480 +#endif
51481 + return;
51482 +}
51483 +
51484 +void gr_handle_kernel_exploit(void)
51485 +{
51486 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
51487 + const struct cred *cred;
51488 + struct task_struct *tsk, *tsk2;
51489 + struct user_struct *user;
51490 + uid_t uid;
51491 +
51492 + if (in_irq() || in_serving_softirq() || in_nmi())
51493 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
51494 +
51495 + uid = current_uid();
51496 +
51497 + if (uid == 0)
51498 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
51499 + else {
51500 + /* kill all the processes of this user, hold a reference
51501 + to their creds struct, and prevent them from creating
51502 + another process until system reset
51503 + */
51504 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
51505 + /* we intentionally leak this ref */
51506 + user = get_uid(current->cred->user);
51507 + if (user) {
51508 + user->banned = 1;
51509 + user->ban_expires = ~0UL;
51510 + }
51511 +
51512 + read_lock(&tasklist_lock);
51513 + do_each_thread(tsk2, tsk) {
51514 + cred = __task_cred(tsk);
51515 + if (cred->uid == uid)
51516 + gr_fake_force_sig(SIGKILL, tsk);
51517 + } while_each_thread(tsk2, tsk);
51518 + read_unlock(&tasklist_lock);
51519 + }
51520 +#endif
51521 +}
51522 +
51523 +int __gr_process_user_ban(struct user_struct *user)
51524 +{
51525 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51526 + if (unlikely(user->banned)) {
51527 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
51528 + user->banned = 0;
51529 + user->ban_expires = 0;
51530 + free_uid(user);
51531 + } else
51532 + return -EPERM;
51533 + }
51534 +#endif
51535 + return 0;
51536 +}
51537 +
51538 +int gr_process_user_ban(void)
51539 +{
51540 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51541 + return __gr_process_user_ban(current->cred->user);
51542 +#endif
51543 + return 0;
51544 +}
51545 diff -urNp linux-2.6.32.42/grsecurity/grsec_sock.c linux-2.6.32.42/grsecurity/grsec_sock.c
51546 --- linux-2.6.32.42/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
51547 +++ linux-2.6.32.42/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
51548 @@ -0,0 +1,275 @@
51549 +#include <linux/kernel.h>
51550 +#include <linux/module.h>
51551 +#include <linux/sched.h>
51552 +#include <linux/file.h>
51553 +#include <linux/net.h>
51554 +#include <linux/in.h>
51555 +#include <linux/ip.h>
51556 +#include <net/sock.h>
51557 +#include <net/inet_sock.h>
51558 +#include <linux/grsecurity.h>
51559 +#include <linux/grinternal.h>
51560 +#include <linux/gracl.h>
51561 +
51562 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
51563 +EXPORT_SYMBOL(gr_cap_rtnetlink);
51564 +
51565 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
51566 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
51567 +
51568 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
51569 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
51570 +
51571 +#ifdef CONFIG_UNIX_MODULE
51572 +EXPORT_SYMBOL(gr_acl_handle_unix);
51573 +EXPORT_SYMBOL(gr_acl_handle_mknod);
51574 +EXPORT_SYMBOL(gr_handle_chroot_unix);
51575 +EXPORT_SYMBOL(gr_handle_create);
51576 +#endif
51577 +
51578 +#ifdef CONFIG_GRKERNSEC
51579 +#define gr_conn_table_size 32749
51580 +struct conn_table_entry {
51581 + struct conn_table_entry *next;
51582 + struct signal_struct *sig;
51583 +};
51584 +
51585 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
51586 +DEFINE_SPINLOCK(gr_conn_table_lock);
51587 +
51588 +extern const char * gr_socktype_to_name(unsigned char type);
51589 +extern const char * gr_proto_to_name(unsigned char proto);
51590 +extern const char * gr_sockfamily_to_name(unsigned char family);
51591 +
51592 +static __inline__ int
51593 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
51594 +{
51595 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
51596 +}
51597 +
51598 +static __inline__ int
51599 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
51600 + __u16 sport, __u16 dport)
51601 +{
51602 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
51603 + sig->gr_sport == sport && sig->gr_dport == dport))
51604 + return 1;
51605 + else
51606 + return 0;
51607 +}
51608 +
51609 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
51610 +{
51611 + struct conn_table_entry **match;
51612 + unsigned int index;
51613 +
51614 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51615 + sig->gr_sport, sig->gr_dport,
51616 + gr_conn_table_size);
51617 +
51618 + newent->sig = sig;
51619 +
51620 + match = &gr_conn_table[index];
51621 + newent->next = *match;
51622 + *match = newent;
51623 +
51624 + return;
51625 +}
51626 +
51627 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
51628 +{
51629 + struct conn_table_entry *match, *last = NULL;
51630 + unsigned int index;
51631 +
51632 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51633 + sig->gr_sport, sig->gr_dport,
51634 + gr_conn_table_size);
51635 +
51636 + match = gr_conn_table[index];
51637 + while (match && !conn_match(match->sig,
51638 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
51639 + sig->gr_dport)) {
51640 + last = match;
51641 + match = match->next;
51642 + }
51643 +
51644 + if (match) {
51645 + if (last)
51646 + last->next = match->next;
51647 + else
51648 + gr_conn_table[index] = NULL;
51649 + kfree(match);
51650 + }
51651 +
51652 + return;
51653 +}
51654 +
51655 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
51656 + __u16 sport, __u16 dport)
51657 +{
51658 + struct conn_table_entry *match;
51659 + unsigned int index;
51660 +
51661 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
51662 +
51663 + match = gr_conn_table[index];
51664 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
51665 + match = match->next;
51666 +
51667 + if (match)
51668 + return match->sig;
51669 + else
51670 + return NULL;
51671 +}
51672 +
51673 +#endif
51674 +
51675 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
51676 +{
51677 +#ifdef CONFIG_GRKERNSEC
51678 + struct signal_struct *sig = task->signal;
51679 + struct conn_table_entry *newent;
51680 +
51681 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
51682 + if (newent == NULL)
51683 + return;
51684 + /* no bh lock needed since we are called with bh disabled */
51685 + spin_lock(&gr_conn_table_lock);
51686 + gr_del_task_from_ip_table_nolock(sig);
51687 + sig->gr_saddr = inet->rcv_saddr;
51688 + sig->gr_daddr = inet->daddr;
51689 + sig->gr_sport = inet->sport;
51690 + sig->gr_dport = inet->dport;
51691 + gr_add_to_task_ip_table_nolock(sig, newent);
51692 + spin_unlock(&gr_conn_table_lock);
51693 +#endif
51694 + return;
51695 +}
51696 +
51697 +void gr_del_task_from_ip_table(struct task_struct *task)
51698 +{
51699 +#ifdef CONFIG_GRKERNSEC
51700 + spin_lock_bh(&gr_conn_table_lock);
51701 + gr_del_task_from_ip_table_nolock(task->signal);
51702 + spin_unlock_bh(&gr_conn_table_lock);
51703 +#endif
51704 + return;
51705 +}
51706 +
51707 +void
51708 +gr_attach_curr_ip(const struct sock *sk)
51709 +{
51710 +#ifdef CONFIG_GRKERNSEC
51711 + struct signal_struct *p, *set;
51712 + const struct inet_sock *inet = inet_sk(sk);
51713 +
51714 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
51715 + return;
51716 +
51717 + set = current->signal;
51718 +
51719 + spin_lock_bh(&gr_conn_table_lock);
51720 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
51721 + inet->dport, inet->sport);
51722 + if (unlikely(p != NULL)) {
51723 + set->curr_ip = p->curr_ip;
51724 + set->used_accept = 1;
51725 + gr_del_task_from_ip_table_nolock(p);
51726 + spin_unlock_bh(&gr_conn_table_lock);
51727 + return;
51728 + }
51729 + spin_unlock_bh(&gr_conn_table_lock);
51730 +
51731 + set->curr_ip = inet->daddr;
51732 + set->used_accept = 1;
51733 +#endif
51734 + return;
51735 +}
51736 +
51737 +int
51738 +gr_handle_sock_all(const int family, const int type, const int protocol)
51739 +{
51740 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51741 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
51742 + (family != AF_UNIX)) {
51743 + if (family == AF_INET)
51744 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
51745 + else
51746 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
51747 + return -EACCES;
51748 + }
51749 +#endif
51750 + return 0;
51751 +}
51752 +
51753 +int
51754 +gr_handle_sock_server(const struct sockaddr *sck)
51755 +{
51756 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51757 + if (grsec_enable_socket_server &&
51758 + in_group_p(grsec_socket_server_gid) &&
51759 + sck && (sck->sa_family != AF_UNIX) &&
51760 + (sck->sa_family != AF_LOCAL)) {
51761 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51762 + return -EACCES;
51763 + }
51764 +#endif
51765 + return 0;
51766 +}
51767 +
51768 +int
51769 +gr_handle_sock_server_other(const struct sock *sck)
51770 +{
51771 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51772 + if (grsec_enable_socket_server &&
51773 + in_group_p(grsec_socket_server_gid) &&
51774 + sck && (sck->sk_family != AF_UNIX) &&
51775 + (sck->sk_family != AF_LOCAL)) {
51776 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51777 + return -EACCES;
51778 + }
51779 +#endif
51780 + return 0;
51781 +}
51782 +
51783 +int
51784 +gr_handle_sock_client(const struct sockaddr *sck)
51785 +{
51786 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51787 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
51788 + sck && (sck->sa_family != AF_UNIX) &&
51789 + (sck->sa_family != AF_LOCAL)) {
51790 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
51791 + return -EACCES;
51792 + }
51793 +#endif
51794 + return 0;
51795 +}
51796 +
51797 +kernel_cap_t
51798 +gr_cap_rtnetlink(struct sock *sock)
51799 +{
51800 +#ifdef CONFIG_GRKERNSEC
51801 + if (!gr_acl_is_enabled())
51802 + return current_cap();
51803 + else if (sock->sk_protocol == NETLINK_ISCSI &&
51804 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
51805 + gr_is_capable(CAP_SYS_ADMIN))
51806 + return current_cap();
51807 + else if (sock->sk_protocol == NETLINK_AUDIT &&
51808 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
51809 + gr_is_capable(CAP_AUDIT_WRITE) &&
51810 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
51811 + gr_is_capable(CAP_AUDIT_CONTROL))
51812 + return current_cap();
51813 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
51814 + ((sock->sk_protocol == NETLINK_ROUTE) ?
51815 + gr_is_capable_nolog(CAP_NET_ADMIN) :
51816 + gr_is_capable(CAP_NET_ADMIN)))
51817 + return current_cap();
51818 + else
51819 + return __cap_empty_set;
51820 +#else
51821 + return current_cap();
51822 +#endif
51823 +}
51824 diff -urNp linux-2.6.32.42/grsecurity/grsec_sysctl.c linux-2.6.32.42/grsecurity/grsec_sysctl.c
51825 --- linux-2.6.32.42/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
51826 +++ linux-2.6.32.42/grsecurity/grsec_sysctl.c 2011-04-17 15:56:46.000000000 -0400
51827 @@ -0,0 +1,479 @@
51828 +#include <linux/kernel.h>
51829 +#include <linux/sched.h>
51830 +#include <linux/sysctl.h>
51831 +#include <linux/grsecurity.h>
51832 +#include <linux/grinternal.h>
51833 +
51834 +int
51835 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
51836 +{
51837 +#ifdef CONFIG_GRKERNSEC_SYSCTL
51838 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
51839 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
51840 + return -EACCES;
51841 + }
51842 +#endif
51843 + return 0;
51844 +}
51845 +
51846 +#ifdef CONFIG_GRKERNSEC_ROFS
51847 +static int __maybe_unused one = 1;
51848 +#endif
51849 +
51850 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
51851 +ctl_table grsecurity_table[] = {
51852 +#ifdef CONFIG_GRKERNSEC_SYSCTL
51853 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
51854 +#ifdef CONFIG_GRKERNSEC_IO
51855 + {
51856 + .ctl_name = CTL_UNNUMBERED,
51857 + .procname = "disable_priv_io",
51858 + .data = &grsec_disable_privio,
51859 + .maxlen = sizeof(int),
51860 + .mode = 0600,
51861 + .proc_handler = &proc_dointvec,
51862 + },
51863 +#endif
51864 +#endif
51865 +#ifdef CONFIG_GRKERNSEC_LINK
51866 + {
51867 + .ctl_name = CTL_UNNUMBERED,
51868 + .procname = "linking_restrictions",
51869 + .data = &grsec_enable_link,
51870 + .maxlen = sizeof(int),
51871 + .mode = 0600,
51872 + .proc_handler = &proc_dointvec,
51873 + },
51874 +#endif
51875 +#ifdef CONFIG_GRKERNSEC_FIFO
51876 + {
51877 + .ctl_name = CTL_UNNUMBERED,
51878 + .procname = "fifo_restrictions",
51879 + .data = &grsec_enable_fifo,
51880 + .maxlen = sizeof(int),
51881 + .mode = 0600,
51882 + .proc_handler = &proc_dointvec,
51883 + },
51884 +#endif
51885 +#ifdef CONFIG_GRKERNSEC_EXECVE
51886 + {
51887 + .ctl_name = CTL_UNNUMBERED,
51888 + .procname = "execve_limiting",
51889 + .data = &grsec_enable_execve,
51890 + .maxlen = sizeof(int),
51891 + .mode = 0600,
51892 + .proc_handler = &proc_dointvec,
51893 + },
51894 +#endif
51895 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
51896 + {
51897 + .ctl_name = CTL_UNNUMBERED,
51898 + .procname = "ip_blackhole",
51899 + .data = &grsec_enable_blackhole,
51900 + .maxlen = sizeof(int),
51901 + .mode = 0600,
51902 + .proc_handler = &proc_dointvec,
51903 + },
51904 + {
51905 + .ctl_name = CTL_UNNUMBERED,
51906 + .procname = "lastack_retries",
51907 + .data = &grsec_lastack_retries,
51908 + .maxlen = sizeof(int),
51909 + .mode = 0600,
51910 + .proc_handler = &proc_dointvec,
51911 + },
51912 +#endif
51913 +#ifdef CONFIG_GRKERNSEC_EXECLOG
51914 + {
51915 + .ctl_name = CTL_UNNUMBERED,
51916 + .procname = "exec_logging",
51917 + .data = &grsec_enable_execlog,
51918 + .maxlen = sizeof(int),
51919 + .mode = 0600,
51920 + .proc_handler = &proc_dointvec,
51921 + },
51922 +#endif
51923 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51924 + {
51925 + .ctl_name = CTL_UNNUMBERED,
51926 + .procname = "rwxmap_logging",
51927 + .data = &grsec_enable_log_rwxmaps,
51928 + .maxlen = sizeof(int),
51929 + .mode = 0600,
51930 + .proc_handler = &proc_dointvec,
51931 + },
51932 +#endif
51933 +#ifdef CONFIG_GRKERNSEC_SIGNAL
51934 + {
51935 + .ctl_name = CTL_UNNUMBERED,
51936 + .procname = "signal_logging",
51937 + .data = &grsec_enable_signal,
51938 + .maxlen = sizeof(int),
51939 + .mode = 0600,
51940 + .proc_handler = &proc_dointvec,
51941 + },
51942 +#endif
51943 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
51944 + {
51945 + .ctl_name = CTL_UNNUMBERED,
51946 + .procname = "forkfail_logging",
51947 + .data = &grsec_enable_forkfail,
51948 + .maxlen = sizeof(int),
51949 + .mode = 0600,
51950 + .proc_handler = &proc_dointvec,
51951 + },
51952 +#endif
51953 +#ifdef CONFIG_GRKERNSEC_TIME
51954 + {
51955 + .ctl_name = CTL_UNNUMBERED,
51956 + .procname = "timechange_logging",
51957 + .data = &grsec_enable_time,
51958 + .maxlen = sizeof(int),
51959 + .mode = 0600,
51960 + .proc_handler = &proc_dointvec,
51961 + },
51962 +#endif
51963 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
51964 + {
51965 + .ctl_name = CTL_UNNUMBERED,
51966 + .procname = "chroot_deny_shmat",
51967 + .data = &grsec_enable_chroot_shmat,
51968 + .maxlen = sizeof(int),
51969 + .mode = 0600,
51970 + .proc_handler = &proc_dointvec,
51971 + },
51972 +#endif
51973 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
51974 + {
51975 + .ctl_name = CTL_UNNUMBERED,
51976 + .procname = "chroot_deny_unix",
51977 + .data = &grsec_enable_chroot_unix,
51978 + .maxlen = sizeof(int),
51979 + .mode = 0600,
51980 + .proc_handler = &proc_dointvec,
51981 + },
51982 +#endif
51983 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
51984 + {
51985 + .ctl_name = CTL_UNNUMBERED,
51986 + .procname = "chroot_deny_mount",
51987 + .data = &grsec_enable_chroot_mount,
51988 + .maxlen = sizeof(int),
51989 + .mode = 0600,
51990 + .proc_handler = &proc_dointvec,
51991 + },
51992 +#endif
51993 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
51994 + {
51995 + .ctl_name = CTL_UNNUMBERED,
51996 + .procname = "chroot_deny_fchdir",
51997 + .data = &grsec_enable_chroot_fchdir,
51998 + .maxlen = sizeof(int),
51999 + .mode = 0600,
52000 + .proc_handler = &proc_dointvec,
52001 + },
52002 +#endif
52003 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52004 + {
52005 + .ctl_name = CTL_UNNUMBERED,
52006 + .procname = "chroot_deny_chroot",
52007 + .data = &grsec_enable_chroot_double,
52008 + .maxlen = sizeof(int),
52009 + .mode = 0600,
52010 + .proc_handler = &proc_dointvec,
52011 + },
52012 +#endif
52013 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52014 + {
52015 + .ctl_name = CTL_UNNUMBERED,
52016 + .procname = "chroot_deny_pivot",
52017 + .data = &grsec_enable_chroot_pivot,
52018 + .maxlen = sizeof(int),
52019 + .mode = 0600,
52020 + .proc_handler = &proc_dointvec,
52021 + },
52022 +#endif
52023 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52024 + {
52025 + .ctl_name = CTL_UNNUMBERED,
52026 + .procname = "chroot_enforce_chdir",
52027 + .data = &grsec_enable_chroot_chdir,
52028 + .maxlen = sizeof(int),
52029 + .mode = 0600,
52030 + .proc_handler = &proc_dointvec,
52031 + },
52032 +#endif
52033 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52034 + {
52035 + .ctl_name = CTL_UNNUMBERED,
52036 + .procname = "chroot_deny_chmod",
52037 + .data = &grsec_enable_chroot_chmod,
52038 + .maxlen = sizeof(int),
52039 + .mode = 0600,
52040 + .proc_handler = &proc_dointvec,
52041 + },
52042 +#endif
52043 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52044 + {
52045 + .ctl_name = CTL_UNNUMBERED,
52046 + .procname = "chroot_deny_mknod",
52047 + .data = &grsec_enable_chroot_mknod,
52048 + .maxlen = sizeof(int),
52049 + .mode = 0600,
52050 + .proc_handler = &proc_dointvec,
52051 + },
52052 +#endif
52053 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52054 + {
52055 + .ctl_name = CTL_UNNUMBERED,
52056 + .procname = "chroot_restrict_nice",
52057 + .data = &grsec_enable_chroot_nice,
52058 + .maxlen = sizeof(int),
52059 + .mode = 0600,
52060 + .proc_handler = &proc_dointvec,
52061 + },
52062 +#endif
52063 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52064 + {
52065 + .ctl_name = CTL_UNNUMBERED,
52066 + .procname = "chroot_execlog",
52067 + .data = &grsec_enable_chroot_execlog,
52068 + .maxlen = sizeof(int),
52069 + .mode = 0600,
52070 + .proc_handler = &proc_dointvec,
52071 + },
52072 +#endif
52073 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52074 + {
52075 + .ctl_name = CTL_UNNUMBERED,
52076 + .procname = "chroot_caps",
52077 + .data = &grsec_enable_chroot_caps,
52078 + .maxlen = sizeof(int),
52079 + .mode = 0600,
52080 + .proc_handler = &proc_dointvec,
52081 + },
52082 +#endif
52083 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52084 + {
52085 + .ctl_name = CTL_UNNUMBERED,
52086 + .procname = "chroot_deny_sysctl",
52087 + .data = &grsec_enable_chroot_sysctl,
52088 + .maxlen = sizeof(int),
52089 + .mode = 0600,
52090 + .proc_handler = &proc_dointvec,
52091 + },
52092 +#endif
52093 +#ifdef CONFIG_GRKERNSEC_TPE
52094 + {
52095 + .ctl_name = CTL_UNNUMBERED,
52096 + .procname = "tpe",
52097 + .data = &grsec_enable_tpe,
52098 + .maxlen = sizeof(int),
52099 + .mode = 0600,
52100 + .proc_handler = &proc_dointvec,
52101 + },
52102 + {
52103 + .ctl_name = CTL_UNNUMBERED,
52104 + .procname = "tpe_gid",
52105 + .data = &grsec_tpe_gid,
52106 + .maxlen = sizeof(int),
52107 + .mode = 0600,
52108 + .proc_handler = &proc_dointvec,
52109 + },
52110 +#endif
52111 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52112 + {
52113 + .ctl_name = CTL_UNNUMBERED,
52114 + .procname = "tpe_invert",
52115 + .data = &grsec_enable_tpe_invert,
52116 + .maxlen = sizeof(int),
52117 + .mode = 0600,
52118 + .proc_handler = &proc_dointvec,
52119 + },
52120 +#endif
52121 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52122 + {
52123 + .ctl_name = CTL_UNNUMBERED,
52124 + .procname = "tpe_restrict_all",
52125 + .data = &grsec_enable_tpe_all,
52126 + .maxlen = sizeof(int),
52127 + .mode = 0600,
52128 + .proc_handler = &proc_dointvec,
52129 + },
52130 +#endif
52131 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52132 + {
52133 + .ctl_name = CTL_UNNUMBERED,
52134 + .procname = "socket_all",
52135 + .data = &grsec_enable_socket_all,
52136 + .maxlen = sizeof(int),
52137 + .mode = 0600,
52138 + .proc_handler = &proc_dointvec,
52139 + },
52140 + {
52141 + .ctl_name = CTL_UNNUMBERED,
52142 + .procname = "socket_all_gid",
52143 + .data = &grsec_socket_all_gid,
52144 + .maxlen = sizeof(int),
52145 + .mode = 0600,
52146 + .proc_handler = &proc_dointvec,
52147 + },
52148 +#endif
52149 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52150 + {
52151 + .ctl_name = CTL_UNNUMBERED,
52152 + .procname = "socket_client",
52153 + .data = &grsec_enable_socket_client,
52154 + .maxlen = sizeof(int),
52155 + .mode = 0600,
52156 + .proc_handler = &proc_dointvec,
52157 + },
52158 + {
52159 + .ctl_name = CTL_UNNUMBERED,
52160 + .procname = "socket_client_gid",
52161 + .data = &grsec_socket_client_gid,
52162 + .maxlen = sizeof(int),
52163 + .mode = 0600,
52164 + .proc_handler = &proc_dointvec,
52165 + },
52166 +#endif
52167 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52168 + {
52169 + .ctl_name = CTL_UNNUMBERED,
52170 + .procname = "socket_server",
52171 + .data = &grsec_enable_socket_server,
52172 + .maxlen = sizeof(int),
52173 + .mode = 0600,
52174 + .proc_handler = &proc_dointvec,
52175 + },
52176 + {
52177 + .ctl_name = CTL_UNNUMBERED,
52178 + .procname = "socket_server_gid",
52179 + .data = &grsec_socket_server_gid,
52180 + .maxlen = sizeof(int),
52181 + .mode = 0600,
52182 + .proc_handler = &proc_dointvec,
52183 + },
52184 +#endif
52185 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
52186 + {
52187 + .ctl_name = CTL_UNNUMBERED,
52188 + .procname = "audit_group",
52189 + .data = &grsec_enable_group,
52190 + .maxlen = sizeof(int),
52191 + .mode = 0600,
52192 + .proc_handler = &proc_dointvec,
52193 + },
52194 + {
52195 + .ctl_name = CTL_UNNUMBERED,
52196 + .procname = "audit_gid",
52197 + .data = &grsec_audit_gid,
52198 + .maxlen = sizeof(int),
52199 + .mode = 0600,
52200 + .proc_handler = &proc_dointvec,
52201 + },
52202 +#endif
52203 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52204 + {
52205 + .ctl_name = CTL_UNNUMBERED,
52206 + .procname = "audit_chdir",
52207 + .data = &grsec_enable_chdir,
52208 + .maxlen = sizeof(int),
52209 + .mode = 0600,
52210 + .proc_handler = &proc_dointvec,
52211 + },
52212 +#endif
52213 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52214 + {
52215 + .ctl_name = CTL_UNNUMBERED,
52216 + .procname = "audit_mount",
52217 + .data = &grsec_enable_mount,
52218 + .maxlen = sizeof(int),
52219 + .mode = 0600,
52220 + .proc_handler = &proc_dointvec,
52221 + },
52222 +#endif
52223 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52224 + {
52225 + .ctl_name = CTL_UNNUMBERED,
52226 + .procname = "audit_textrel",
52227 + .data = &grsec_enable_audit_textrel,
52228 + .maxlen = sizeof(int),
52229 + .mode = 0600,
52230 + .proc_handler = &proc_dointvec,
52231 + },
52232 +#endif
52233 +#ifdef CONFIG_GRKERNSEC_DMESG
52234 + {
52235 + .ctl_name = CTL_UNNUMBERED,
52236 + .procname = "dmesg",
52237 + .data = &grsec_enable_dmesg,
52238 + .maxlen = sizeof(int),
52239 + .mode = 0600,
52240 + .proc_handler = &proc_dointvec,
52241 + },
52242 +#endif
52243 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52244 + {
52245 + .ctl_name = CTL_UNNUMBERED,
52246 + .procname = "chroot_findtask",
52247 + .data = &grsec_enable_chroot_findtask,
52248 + .maxlen = sizeof(int),
52249 + .mode = 0600,
52250 + .proc_handler = &proc_dointvec,
52251 + },
52252 +#endif
52253 +#ifdef CONFIG_GRKERNSEC_RESLOG
52254 + {
52255 + .ctl_name = CTL_UNNUMBERED,
52256 + .procname = "resource_logging",
52257 + .data = &grsec_resource_logging,
52258 + .maxlen = sizeof(int),
52259 + .mode = 0600,
52260 + .proc_handler = &proc_dointvec,
52261 + },
52262 +#endif
52263 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52264 + {
52265 + .ctl_name = CTL_UNNUMBERED,
52266 + .procname = "audit_ptrace",
52267 + .data = &grsec_enable_audit_ptrace,
52268 + .maxlen = sizeof(int),
52269 + .mode = 0600,
52270 + .proc_handler = &proc_dointvec,
52271 + },
52272 +#endif
52273 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52274 + {
52275 + .ctl_name = CTL_UNNUMBERED,
52276 + .procname = "harden_ptrace",
52277 + .data = &grsec_enable_harden_ptrace,
52278 + .maxlen = sizeof(int),
52279 + .mode = 0600,
52280 + .proc_handler = &proc_dointvec,
52281 + },
52282 +#endif
52283 + {
52284 + .ctl_name = CTL_UNNUMBERED,
52285 + .procname = "grsec_lock",
52286 + .data = &grsec_lock,
52287 + .maxlen = sizeof(int),
52288 + .mode = 0600,
52289 + .proc_handler = &proc_dointvec,
52290 + },
52291 +#endif
52292 +#ifdef CONFIG_GRKERNSEC_ROFS
52293 + {
52294 + .ctl_name = CTL_UNNUMBERED,
52295 + .procname = "romount_protect",
52296 + .data = &grsec_enable_rofs,
52297 + .maxlen = sizeof(int),
52298 + .mode = 0600,
52299 + .proc_handler = &proc_dointvec_minmax,
52300 + .extra1 = &one,
52301 + .extra2 = &one,
52302 + },
52303 +#endif
52304 + { .ctl_name = 0 }
52305 +};
52306 +#endif
52307 diff -urNp linux-2.6.32.42/grsecurity/grsec_time.c linux-2.6.32.42/grsecurity/grsec_time.c
52308 --- linux-2.6.32.42/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
52309 +++ linux-2.6.32.42/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
52310 @@ -0,0 +1,16 @@
52311 +#include <linux/kernel.h>
52312 +#include <linux/sched.h>
52313 +#include <linux/grinternal.h>
52314 +#include <linux/module.h>
52315 +
52316 +void
52317 +gr_log_timechange(void)
52318 +{
52319 +#ifdef CONFIG_GRKERNSEC_TIME
52320 + if (grsec_enable_time)
52321 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
52322 +#endif
52323 + return;
52324 +}
52325 +
52326 +EXPORT_SYMBOL(gr_log_timechange);
52327 diff -urNp linux-2.6.32.42/grsecurity/grsec_tpe.c linux-2.6.32.42/grsecurity/grsec_tpe.c
52328 --- linux-2.6.32.42/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
52329 +++ linux-2.6.32.42/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
52330 @@ -0,0 +1,39 @@
52331 +#include <linux/kernel.h>
52332 +#include <linux/sched.h>
52333 +#include <linux/file.h>
52334 +#include <linux/fs.h>
52335 +#include <linux/grinternal.h>
52336 +
52337 +extern int gr_acl_tpe_check(void);
52338 +
52339 +int
52340 +gr_tpe_allow(const struct file *file)
52341 +{
52342 +#ifdef CONFIG_GRKERNSEC
52343 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
52344 + const struct cred *cred = current_cred();
52345 +
52346 + if (cred->uid && ((grsec_enable_tpe &&
52347 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52348 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
52349 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
52350 +#else
52351 + in_group_p(grsec_tpe_gid)
52352 +#endif
52353 + ) || gr_acl_tpe_check()) &&
52354 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
52355 + (inode->i_mode & S_IWOTH))))) {
52356 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52357 + return 0;
52358 + }
52359 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52360 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
52361 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
52362 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
52363 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52364 + return 0;
52365 + }
52366 +#endif
52367 +#endif
52368 + return 1;
52369 +}
52370 diff -urNp linux-2.6.32.42/grsecurity/grsum.c linux-2.6.32.42/grsecurity/grsum.c
52371 --- linux-2.6.32.42/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
52372 +++ linux-2.6.32.42/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
52373 @@ -0,0 +1,61 @@
52374 +#include <linux/err.h>
52375 +#include <linux/kernel.h>
52376 +#include <linux/sched.h>
52377 +#include <linux/mm.h>
52378 +#include <linux/scatterlist.h>
52379 +#include <linux/crypto.h>
52380 +#include <linux/gracl.h>
52381 +
52382 +
52383 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
52384 +#error "crypto and sha256 must be built into the kernel"
52385 +#endif
52386 +
52387 +int
52388 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
52389 +{
52390 + char *p;
52391 + struct crypto_hash *tfm;
52392 + struct hash_desc desc;
52393 + struct scatterlist sg;
52394 + unsigned char temp_sum[GR_SHA_LEN];
52395 + volatile int retval = 0;
52396 + volatile int dummy = 0;
52397 + unsigned int i;
52398 +
52399 + sg_init_table(&sg, 1);
52400 +
52401 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
52402 + if (IS_ERR(tfm)) {
52403 + /* should never happen, since sha256 should be built in */
52404 + return 1;
52405 + }
52406 +
52407 + desc.tfm = tfm;
52408 + desc.flags = 0;
52409 +
52410 + crypto_hash_init(&desc);
52411 +
52412 + p = salt;
52413 + sg_set_buf(&sg, p, GR_SALT_LEN);
52414 + crypto_hash_update(&desc, &sg, sg.length);
52415 +
52416 + p = entry->pw;
52417 + sg_set_buf(&sg, p, strlen(p));
52418 +
52419 + crypto_hash_update(&desc, &sg, sg.length);
52420 +
52421 + crypto_hash_final(&desc, temp_sum);
52422 +
52423 + memset(entry->pw, 0, GR_PW_LEN);
52424 +
52425 + for (i = 0; i < GR_SHA_LEN; i++)
52426 + if (sum[i] != temp_sum[i])
52427 + retval = 1;
52428 + else
52429 + dummy = 1; // waste a cycle
52430 +
52431 + crypto_free_hash(tfm);
52432 +
52433 + return retval;
52434 +}
52435 diff -urNp linux-2.6.32.42/grsecurity/Kconfig linux-2.6.32.42/grsecurity/Kconfig
52436 --- linux-2.6.32.42/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
52437 +++ linux-2.6.32.42/grsecurity/Kconfig 2011-06-13 21:34:09.000000000 -0400
52438 @@ -0,0 +1,1045 @@
52439 +#
52440 +# grecurity configuration
52441 +#
52442 +
52443 +menu "Grsecurity"
52444 +
52445 +config GRKERNSEC
52446 + bool "Grsecurity"
52447 + select CRYPTO
52448 + select CRYPTO_SHA256
52449 + help
52450 + If you say Y here, you will be able to configure many features
52451 + that will enhance the security of your system. It is highly
52452 + recommended that you say Y here and read through the help
52453 + for each option so that you fully understand the features and
52454 + can evaluate their usefulness for your machine.
52455 +
52456 +choice
52457 + prompt "Security Level"
52458 + depends on GRKERNSEC
52459 + default GRKERNSEC_CUSTOM
52460 +
52461 +config GRKERNSEC_LOW
52462 + bool "Low"
52463 + select GRKERNSEC_LINK
52464 + select GRKERNSEC_FIFO
52465 + select GRKERNSEC_EXECVE
52466 + select GRKERNSEC_RANDNET
52467 + select GRKERNSEC_DMESG
52468 + select GRKERNSEC_CHROOT
52469 + select GRKERNSEC_CHROOT_CHDIR
52470 +
52471 + help
52472 + If you choose this option, several of the grsecurity options will
52473 + be enabled that will give you greater protection against a number
52474 + of attacks, while assuring that none of your software will have any
52475 + conflicts with the additional security measures. If you run a lot
52476 + of unusual software, or you are having problems with the higher
52477 + security levels, you should say Y here. With this option, the
52478 + following features are enabled:
52479 +
52480 + - Linking restrictions
52481 + - FIFO restrictions
52482 + - Enforcing RLIMIT_NPROC on execve
52483 + - Restricted dmesg
52484 + - Enforced chdir("/") on chroot
52485 + - Runtime module disabling
52486 +
52487 +config GRKERNSEC_MEDIUM
52488 + bool "Medium"
52489 + select PAX
52490 + select PAX_EI_PAX
52491 + select PAX_PT_PAX_FLAGS
52492 + select PAX_HAVE_ACL_FLAGS
52493 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52494 + select GRKERNSEC_CHROOT
52495 + select GRKERNSEC_CHROOT_SYSCTL
52496 + select GRKERNSEC_LINK
52497 + select GRKERNSEC_FIFO
52498 + select GRKERNSEC_EXECVE
52499 + select GRKERNSEC_DMESG
52500 + select GRKERNSEC_RANDNET
52501 + select GRKERNSEC_FORKFAIL
52502 + select GRKERNSEC_TIME
52503 + select GRKERNSEC_SIGNAL
52504 + select GRKERNSEC_CHROOT
52505 + select GRKERNSEC_CHROOT_UNIX
52506 + select GRKERNSEC_CHROOT_MOUNT
52507 + select GRKERNSEC_CHROOT_PIVOT
52508 + select GRKERNSEC_CHROOT_DOUBLE
52509 + select GRKERNSEC_CHROOT_CHDIR
52510 + select GRKERNSEC_CHROOT_MKNOD
52511 + select GRKERNSEC_PROC
52512 + select GRKERNSEC_PROC_USERGROUP
52513 + select PAX_RANDUSTACK
52514 + select PAX_ASLR
52515 + select PAX_RANDMMAP
52516 + select PAX_REFCOUNT if (X86 || SPARC64)
52517 + select PAX_USERCOPY if ((X86 || SPARC32 || SPARC64 || PPC) && (SLAB || SLUB || SLOB))
52518 +
52519 + help
52520 + If you say Y here, several features in addition to those included
52521 + in the low additional security level will be enabled. These
52522 + features provide even more security to your system, though in rare
52523 + cases they may be incompatible with very old or poorly written
52524 + software. If you enable this option, make sure that your auth
52525 + service (identd) is running as gid 1001. With this option,
52526 + the following features (in addition to those provided in the
52527 + low additional security level) will be enabled:
52528 +
52529 + - Failed fork logging
52530 + - Time change logging
52531 + - Signal logging
52532 + - Deny mounts in chroot
52533 + - Deny double chrooting
52534 + - Deny sysctl writes in chroot
52535 + - Deny mknod in chroot
52536 + - Deny access to abstract AF_UNIX sockets out of chroot
52537 + - Deny pivot_root in chroot
52538 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
52539 + - /proc restrictions with special GID set to 10 (usually wheel)
52540 + - Address Space Layout Randomization (ASLR)
52541 + - Prevent exploitation of most refcount overflows
52542 + - Bounds checking of copying between the kernel and userland
52543 +
52544 +config GRKERNSEC_HIGH
52545 + bool "High"
52546 + select GRKERNSEC_LINK
52547 + select GRKERNSEC_FIFO
52548 + select GRKERNSEC_EXECVE
52549 + select GRKERNSEC_DMESG
52550 + select GRKERNSEC_FORKFAIL
52551 + select GRKERNSEC_TIME
52552 + select GRKERNSEC_SIGNAL
52553 + select GRKERNSEC_CHROOT
52554 + select GRKERNSEC_CHROOT_SHMAT
52555 + select GRKERNSEC_CHROOT_UNIX
52556 + select GRKERNSEC_CHROOT_MOUNT
52557 + select GRKERNSEC_CHROOT_FCHDIR
52558 + select GRKERNSEC_CHROOT_PIVOT
52559 + select GRKERNSEC_CHROOT_DOUBLE
52560 + select GRKERNSEC_CHROOT_CHDIR
52561 + select GRKERNSEC_CHROOT_MKNOD
52562 + select GRKERNSEC_CHROOT_CAPS
52563 + select GRKERNSEC_CHROOT_SYSCTL
52564 + select GRKERNSEC_CHROOT_FINDTASK
52565 + select GRKERNSEC_SYSFS_RESTRICT
52566 + select GRKERNSEC_PROC
52567 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52568 + select GRKERNSEC_HIDESYM
52569 + select GRKERNSEC_BRUTE
52570 + select GRKERNSEC_PROC_USERGROUP
52571 + select GRKERNSEC_KMEM
52572 + select GRKERNSEC_RESLOG
52573 + select GRKERNSEC_RANDNET
52574 + select GRKERNSEC_PROC_ADD
52575 + select GRKERNSEC_CHROOT_CHMOD
52576 + select GRKERNSEC_CHROOT_NICE
52577 + select GRKERNSEC_AUDIT_MOUNT
52578 + select GRKERNSEC_MODHARDEN if (MODULES)
52579 + select GRKERNSEC_HARDEN_PTRACE
52580 + select GRKERNSEC_VM86 if (X86_32)
52581 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC32 || SPARC64)
52582 + select PAX
52583 + select PAX_RANDUSTACK
52584 + select PAX_ASLR
52585 + select PAX_RANDMMAP
52586 + select PAX_NOEXEC
52587 + select PAX_MPROTECT
52588 + select PAX_EI_PAX
52589 + select PAX_PT_PAX_FLAGS
52590 + select PAX_HAVE_ACL_FLAGS
52591 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
52592 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
52593 + select PAX_RANDKSTACK if (X86_TSC && X86)
52594 + select PAX_SEGMEXEC if (X86_32)
52595 + select PAX_PAGEEXEC
52596 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC32 || SPARC64)
52597 + select PAX_EMUTRAMP if (PARISC)
52598 + select PAX_EMUSIGRT if (PARISC)
52599 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
52600 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
52601 + select PAX_REFCOUNT if (X86 || SPARC64)
52602 + select PAX_USERCOPY if ((X86 || PPC || SPARC32 || SPARC64) && (SLAB || SLUB || SLOB))
52603 + help
52604 + If you say Y here, many of the features of grsecurity will be
52605 + enabled, which will protect you against many kinds of attacks
52606 + against your system. The heightened security comes at a cost
52607 + of an increased chance of incompatibilities with rare software
52608 + on your machine. Since this security level enables PaX, you should
52609 + view <http://pax.grsecurity.net> and read about the PaX
52610 + project. While you are there, download chpax and run it on
52611 + binaries that cause problems with PaX. Also remember that
52612 + since the /proc restrictions are enabled, you must run your
52613 + identd as gid 1001. This security level enables the following
52614 + features in addition to those listed in the low and medium
52615 + security levels:
52616 +
52617 + - Additional /proc restrictions
52618 + - Chmod restrictions in chroot
52619 + - No signals, ptrace, or viewing of processes outside of chroot
52620 + - Capability restrictions in chroot
52621 + - Deny fchdir out of chroot
52622 + - Priority restrictions in chroot
52623 + - Segmentation-based implementation of PaX
52624 + - Mprotect restrictions
52625 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
52626 + - Kernel stack randomization
52627 + - Mount/unmount/remount logging
52628 + - Kernel symbol hiding
52629 + - Prevention of memory exhaustion-based exploits
52630 + - Hardening of module auto-loading
52631 + - Ptrace restrictions
52632 + - Restricted vm86 mode
52633 + - Restricted sysfs/debugfs
52634 + - Active kernel exploit response
52635 +
52636 +config GRKERNSEC_CUSTOM
52637 + bool "Custom"
52638 + help
52639 + If you say Y here, you will be able to configure every grsecurity
52640 + option, which allows you to enable many more features that aren't
52641 + covered in the basic security levels. These additional features
52642 + include TPE, socket restrictions, and the sysctl system for
52643 + grsecurity. It is advised that you read through the help for
52644 + each option to determine its usefulness in your situation.
52645 +
52646 +endchoice
52647 +
52648 +menu "Address Space Protection"
52649 +depends on GRKERNSEC
52650 +
52651 +config GRKERNSEC_KMEM
52652 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
52653 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
52654 + help
52655 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
52656 + be written to via mmap or otherwise to modify the running kernel.
52657 + /dev/port will also not be allowed to be opened. If you have module
52658 + support disabled, enabling this will close up four ways that are
52659 + currently used to insert malicious code into the running kernel.
52660 + Even with all these features enabled, we still highly recommend that
52661 + you use the RBAC system, as it is still possible for an attacker to
52662 + modify the running kernel through privileged I/O granted by ioperm/iopl.
52663 + If you are not using XFree86, you may be able to stop this additional
52664 + case by enabling the 'Disable privileged I/O' option. Though nothing
52665 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
52666 + but only to video memory, which is the only writing we allow in this
52667 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
52668 + not be allowed to mprotect it with PROT_WRITE later.
52669 + It is highly recommended that you say Y here if you meet all the
52670 + conditions above.
52671 +
52672 +config GRKERNSEC_VM86
52673 + bool "Restrict VM86 mode"
52674 + depends on X86_32
52675 +
52676 + help
52677 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
52678 + make use of a special execution mode on 32bit x86 processors called
52679 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
52680 + video cards and will still work with this option enabled. The purpose
52681 + of the option is to prevent exploitation of emulation errors in
52682 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
52683 + Nearly all users should be able to enable this option.
52684 +
52685 +config GRKERNSEC_IO
52686 + bool "Disable privileged I/O"
52687 + depends on X86
52688 + select RTC_CLASS
52689 + select RTC_INTF_DEV
52690 + select RTC_DRV_CMOS
52691 +
52692 + help
52693 + If you say Y here, all ioperm and iopl calls will return an error.
52694 + Ioperm and iopl can be used to modify the running kernel.
52695 + Unfortunately, some programs need this access to operate properly,
52696 + the most notable of which are XFree86 and hwclock. hwclock can be
52697 + remedied by having RTC support in the kernel, so real-time
52698 + clock support is enabled if this option is enabled, to ensure
52699 + that hwclock operates correctly. XFree86 still will not
52700 + operate correctly with this option enabled, so DO NOT CHOOSE Y
52701 + IF YOU USE XFree86. If you use XFree86 and you still want to
52702 + protect your kernel against modification, use the RBAC system.
52703 +
52704 +config GRKERNSEC_PROC_MEMMAP
52705 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
52706 + default y if (PAX_NOEXEC || PAX_ASLR)
52707 + depends on PAX_NOEXEC || PAX_ASLR
52708 + help
52709 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
52710 + give no information about the addresses of its mappings if
52711 + PaX features that rely on random addresses are enabled on the task.
52712 + If you use PaX it is greatly recommended that you say Y here as it
52713 + closes up a hole that makes the full ASLR useless for suid
52714 + binaries.
52715 +
52716 +config GRKERNSEC_BRUTE
52717 + bool "Deter exploit bruteforcing"
52718 + help
52719 + If you say Y here, attempts to bruteforce exploits against forking
52720 + daemons such as apache or sshd, as well as against suid/sgid binaries
52721 + will be deterred. When a child of a forking daemon is killed by PaX
52722 + or crashes due to an illegal instruction or other suspicious signal,
52723 + the parent process will be delayed 30 seconds upon every subsequent
52724 + fork until the administrator is able to assess the situation and
52725 + restart the daemon.
52726 + In the suid/sgid case, the attempt is logged, the user has all their
52727 + processes terminated, and they are prevented from executing any further
52728 + processes for 15 minutes.
52729 + It is recommended that you also enable signal logging in the auditing
52730 + section so that logs are generated when a process triggers a suspicious
52731 + signal.
52732 +
52733 +config GRKERNSEC_MODHARDEN
52734 + bool "Harden module auto-loading"
52735 + depends on MODULES
52736 + help
52737 + If you say Y here, module auto-loading in response to use of some
52738 + feature implemented by an unloaded module will be restricted to
52739 + root users. Enabling this option helps defend against attacks
52740 + by unprivileged users who abuse the auto-loading behavior to
52741 + cause a vulnerable module to load that is then exploited.
52742 +
52743 + If this option prevents a legitimate use of auto-loading for a
52744 + non-root user, the administrator can execute modprobe manually
52745 + with the exact name of the module mentioned in the alert log.
52746 + Alternatively, the administrator can add the module to the list
52747 + of modules loaded at boot by modifying init scripts.
52748 +
52749 + Modification of init scripts will most likely be needed on
52750 + Ubuntu servers with encrypted home directory support enabled,
52751 + as the first non-root user logging in will cause the ecb(aes),
52752 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
52753 +
52754 +config GRKERNSEC_HIDESYM
52755 + bool "Hide kernel symbols"
52756 + help
52757 + If you say Y here, getting information on loaded modules, and
52758 + displaying all kernel symbols through a syscall will be restricted
52759 + to users with CAP_SYS_MODULE. For software compatibility reasons,
52760 + /proc/kallsyms will be restricted to the root user. The RBAC
52761 + system can hide that entry even from root.
52762 +
52763 + This option also prevents leaking of kernel addresses through
52764 + several /proc entries.
52765 +
52766 + Note that this option is only effective provided the following
52767 + conditions are met:
52768 + 1) The kernel using grsecurity is not precompiled by some distribution
52769 + 2) You have also enabled GRKERNSEC_DMESG
52770 + 3) You are using the RBAC system and hiding other files such as your
52771 + kernel image and System.map. Alternatively, enabling this option
52772 + causes the permissions on /boot, /lib/modules, and the kernel
52773 + source directory to change at compile time to prevent
52774 + reading by non-root users.
52775 + If the above conditions are met, this option will aid in providing a
52776 + useful protection against local kernel exploitation of overflows
52777 + and arbitrary read/write vulnerabilities.
52778 +
52779 +config GRKERNSEC_KERN_LOCKOUT
52780 + bool "Active kernel exploit response"
52781 + depends on X86 || ARM || PPC || SPARC32 || SPARC64
52782 + help
52783 + If you say Y here, when a PaX alert is triggered due to suspicious
52784 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
52785 + or an OOPs occurs due to bad memory accesses, instead of just
52786 + terminating the offending process (and potentially allowing
52787 + a subsequent exploit from the same user), we will take one of two
52788 + actions:
52789 + If the user was root, we will panic the system
52790 + If the user was non-root, we will log the attempt, terminate
52791 + all processes owned by the user, then prevent them from creating
52792 + any new processes until the system is restarted
52793 + This deters repeated kernel exploitation/bruteforcing attempts
52794 + and is useful for later forensics.
52795 +
52796 +endmenu
52797 +menu "Role Based Access Control Options"
52798 +depends on GRKERNSEC
52799 +
52800 +config GRKERNSEC_RBAC_DEBUG
52801 + bool
52802 +
52803 +config GRKERNSEC_NO_RBAC
52804 + bool "Disable RBAC system"
52805 + help
52806 + If you say Y here, the /dev/grsec device will be removed from the kernel,
52807 + preventing the RBAC system from being enabled. You should only say Y
52808 + here if you have no intention of using the RBAC system, so as to prevent
52809 + an attacker with root access from misusing the RBAC system to hide files
52810 + and processes when loadable module support and /dev/[k]mem have been
52811 + locked down.
52812 +
52813 +config GRKERNSEC_ACL_HIDEKERN
52814 + bool "Hide kernel processes"
52815 + help
52816 + If you say Y here, all kernel threads will be hidden to all
52817 + processes but those whose subject has the "view hidden processes"
52818 + flag.
52819 +
52820 +config GRKERNSEC_ACL_MAXTRIES
52821 + int "Maximum tries before password lockout"
52822 + default 3
52823 + help
52824 + This option enforces the maximum number of times a user can attempt
52825 + to authorize themselves with the grsecurity RBAC system before being
52826 + denied the ability to attempt authorization again for a specified time.
52827 + The lower the number, the harder it will be to brute-force a password.
52828 +
52829 +config GRKERNSEC_ACL_TIMEOUT
52830 + int "Time to wait after max password tries, in seconds"
52831 + default 30
52832 + help
52833 + This option specifies the time the user must wait after attempting to
52834 + authorize to the RBAC system with the maximum number of invalid
52835 + passwords. The higher the number, the harder it will be to brute-force
52836 + a password.
52837 +
52838 +endmenu
52839 +menu "Filesystem Protections"
52840 +depends on GRKERNSEC
52841 +
52842 +config GRKERNSEC_PROC
52843 + bool "Proc restrictions"
52844 + help
52845 + If you say Y here, the permissions of the /proc filesystem
52846 + will be altered to enhance system security and privacy. You MUST
52847 + choose either a user only restriction or a user and group restriction.
52848 + Depending upon the option you choose, you can either restrict users to
52849 + see only the processes they themselves run, or choose a group that can
52850 + view all processes and files normally restricted to root if you choose
52851 + the "restrict to user only" option. NOTE: If you're running identd as
52852 + a non-root user, you will have to run it as the group you specify here.
52853 +
52854 +config GRKERNSEC_PROC_USER
52855 + bool "Restrict /proc to user only"
52856 + depends on GRKERNSEC_PROC
52857 + help
52858 + If you say Y here, non-root users will only be able to view their own
52859 + processes, and restricts them from viewing network-related information,
52860 + and viewing kernel symbol and module information.
52861 +
52862 +config GRKERNSEC_PROC_USERGROUP
52863 + bool "Allow special group"
52864 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
52865 + help
52866 + If you say Y here, you will be able to select a group that will be
52867 + able to view all processes and network-related information. If you've
52868 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
52869 + remain hidden. This option is useful if you want to run identd as
52870 + a non-root user.
52871 +
52872 +config GRKERNSEC_PROC_GID
52873 + int "GID for special group"
52874 + depends on GRKERNSEC_PROC_USERGROUP
52875 + default 1001
52876 +
52877 +config GRKERNSEC_PROC_ADD
52878 + bool "Additional restrictions"
52879 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
52880 + help
52881 + If you say Y here, additional restrictions will be placed on
52882 + /proc that keep normal users from viewing device information and
52883 + slabinfo information that could be useful for exploits.
52884 +
52885 +config GRKERNSEC_LINK
52886 + bool "Linking restrictions"
52887 + help
52888 + If you say Y here, /tmp race exploits will be prevented, since users
52889 + will no longer be able to follow symlinks owned by other users in
52890 + world-writable +t directories (e.g. /tmp), unless the owner of the
52891 + symlink is the owner of the directory. users will also not be
52892 + able to hardlink to files they do not own. If the sysctl option is
52893 + enabled, a sysctl option with name "linking_restrictions" is created.
52894 +
52895 +config GRKERNSEC_FIFO
52896 + bool "FIFO restrictions"
52897 + help
52898 + If you say Y here, users will not be able to write to FIFOs they don't
52899 + own in world-writable +t directories (e.g. /tmp), unless the owner of
52900 + the FIFO is the same owner of the directory it's held in. If the sysctl
52901 + option is enabled, a sysctl option with name "fifo_restrictions" is
52902 + created.
52903 +
52904 +config GRKERNSEC_SYSFS_RESTRICT
52905 + bool "Sysfs/debugfs restriction"
52906 + depends on SYSFS
52907 + help
52908 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
52909 + any filesystem normally mounted under it (e.g. debugfs) will only
52910 + be accessible by root. These filesystems generally provide access
52911 + to hardware and debug information that isn't appropriate for unprivileged
52912 + users of the system. Sysfs and debugfs have also become a large source
52913 + of new vulnerabilities, ranging from infoleaks to local compromise.
52914 + There has been very little oversight with an eye toward security involved
52915 + in adding new exporters of information to these filesystems, so their
52916 + use is discouraged.
52917 + This option is equivalent to a chmod 0700 of the mount paths.
52918 +
52919 +config GRKERNSEC_ROFS
52920 + bool "Runtime read-only mount protection"
52921 + help
52922 + If you say Y here, a sysctl option with name "romount_protect" will
52923 + be created. By setting this option to 1 at runtime, filesystems
52924 + will be protected in the following ways:
52925 + * No new writable mounts will be allowed
52926 + * Existing read-only mounts won't be able to be remounted read/write
52927 + * Write operations will be denied on all block devices
52928 + This option acts independently of grsec_lock: once it is set to 1,
52929 + it cannot be turned off. Therefore, please be mindful of the resulting
52930 + behavior if this option is enabled in an init script on a read-only
52931 + filesystem. This feature is mainly intended for secure embedded systems.
52932 +
52933 +config GRKERNSEC_CHROOT
52934 + bool "Chroot jail restrictions"
52935 + help
52936 + If you say Y here, you will be able to choose several options that will
52937 + make breaking out of a chrooted jail much more difficult. If you
52938 + encounter no software incompatibilities with the following options, it
52939 + is recommended that you enable each one.
52940 +
52941 +config GRKERNSEC_CHROOT_MOUNT
52942 + bool "Deny mounts"
52943 + depends on GRKERNSEC_CHROOT
52944 + help
52945 + If you say Y here, processes inside a chroot will not be able to
52946 + mount or remount filesystems. If the sysctl option is enabled, a
52947 + sysctl option with name "chroot_deny_mount" is created.
52948 +
52949 +config GRKERNSEC_CHROOT_DOUBLE
52950 + bool "Deny double-chroots"
52951 + depends on GRKERNSEC_CHROOT
52952 + help
52953 + If you say Y here, processes inside a chroot will not be able to chroot
52954 + again outside the chroot. This is a widely used method of breaking
52955 + out of a chroot jail and should not be allowed. If the sysctl
52956 + option is enabled, a sysctl option with name
52957 + "chroot_deny_chroot" is created.
52958 +
52959 +config GRKERNSEC_CHROOT_PIVOT
52960 + bool "Deny pivot_root in chroot"
52961 + depends on GRKERNSEC_CHROOT
52962 + help
52963 + If you say Y here, processes inside a chroot will not be able to use
52964 + a function called pivot_root() that was introduced in Linux 2.3.41. It
52965 + works similar to chroot in that it changes the root filesystem. This
52966 + function could be misused in a chrooted process to attempt to break out
52967 + of the chroot, and therefore should not be allowed. If the sysctl
52968 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
52969 + created.
52970 +
52971 +config GRKERNSEC_CHROOT_CHDIR
52972 + bool "Enforce chdir(\"/\") on all chroots"
52973 + depends on GRKERNSEC_CHROOT
52974 + help
52975 + If you say Y here, the current working directory of all newly-chrooted
52976 + applications will be set to the the root directory of the chroot.
52977 + The man page on chroot(2) states:
52978 + Note that this call does not change the current working
52979 + directory, so that `.' can be outside the tree rooted at
52980 + `/'. In particular, the super-user can escape from a
52981 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
52982 +
52983 + It is recommended that you say Y here, since it's not known to break
52984 + any software. If the sysctl option is enabled, a sysctl option with
52985 + name "chroot_enforce_chdir" is created.
52986 +
52987 +config GRKERNSEC_CHROOT_CHMOD
52988 + bool "Deny (f)chmod +s"
52989 + depends on GRKERNSEC_CHROOT
52990 + help
52991 + If you say Y here, processes inside a chroot will not be able to chmod
52992 + or fchmod files to make them have suid or sgid bits. This protects
52993 + against another published method of breaking a chroot. If the sysctl
52994 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
52995 + created.
52996 +
52997 +config GRKERNSEC_CHROOT_FCHDIR
52998 + bool "Deny fchdir out of chroot"
52999 + depends on GRKERNSEC_CHROOT
53000 + help
53001 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
53002 + to a file descriptor of the chrooting process that points to a directory
53003 + outside the filesystem will be stopped. If the sysctl option
53004 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
53005 +
53006 +config GRKERNSEC_CHROOT_MKNOD
53007 + bool "Deny mknod"
53008 + depends on GRKERNSEC_CHROOT
53009 + help
53010 + If you say Y here, processes inside a chroot will not be allowed to
53011 + mknod. The problem with using mknod inside a chroot is that it
53012 + would allow an attacker to create a device entry that is the same
53013 + as one on the physical root of your system, which could range from
53014 + anything from the console device to a device for your harddrive (which
53015 + they could then use to wipe the drive or steal data). It is recommended
53016 + that you say Y here, unless you run into software incompatibilities.
53017 + If the sysctl option is enabled, a sysctl option with name
53018 + "chroot_deny_mknod" is created.
53019 +
53020 +config GRKERNSEC_CHROOT_SHMAT
53021 + bool "Deny shmat() out of chroot"
53022 + depends on GRKERNSEC_CHROOT
53023 + help
53024 + If you say Y here, processes inside a chroot will not be able to attach
53025 + to shared memory segments that were created outside of the chroot jail.
53026 + It is recommended that you say Y here. If the sysctl option is enabled,
53027 + a sysctl option with name "chroot_deny_shmat" is created.
53028 +
53029 +config GRKERNSEC_CHROOT_UNIX
53030 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
53031 + depends on GRKERNSEC_CHROOT
53032 + help
53033 + If you say Y here, processes inside a chroot will not be able to
53034 + connect to abstract (meaning not belonging to a filesystem) Unix
53035 + domain sockets that were bound outside of a chroot. It is recommended
53036 + that you say Y here. If the sysctl option is enabled, a sysctl option
53037 + with name "chroot_deny_unix" is created.
53038 +
53039 +config GRKERNSEC_CHROOT_FINDTASK
53040 + bool "Protect outside processes"
53041 + depends on GRKERNSEC_CHROOT
53042 + help
53043 + If you say Y here, processes inside a chroot will not be able to
53044 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
53045 + getsid, or view any process outside of the chroot. If the sysctl
53046 + option is enabled, a sysctl option with name "chroot_findtask" is
53047 + created.
53048 +
53049 +config GRKERNSEC_CHROOT_NICE
53050 + bool "Restrict priority changes"
53051 + depends on GRKERNSEC_CHROOT
53052 + help
53053 + If you say Y here, processes inside a chroot will not be able to raise
53054 + the priority of processes in the chroot, or alter the priority of
53055 + processes outside the chroot. This provides more security than simply
53056 + removing CAP_SYS_NICE from the process' capability set. If the
53057 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
53058 + is created.
53059 +
53060 +config GRKERNSEC_CHROOT_SYSCTL
53061 + bool "Deny sysctl writes"
53062 + depends on GRKERNSEC_CHROOT
53063 + help
53064 + If you say Y here, an attacker in a chroot will not be able to
53065 + write to sysctl entries, either by sysctl(2) or through a /proc
53066 + interface. It is strongly recommended that you say Y here. If the
53067 + sysctl option is enabled, a sysctl option with name
53068 + "chroot_deny_sysctl" is created.
53069 +
53070 +config GRKERNSEC_CHROOT_CAPS
53071 + bool "Capability restrictions"
53072 + depends on GRKERNSEC_CHROOT
53073 + help
53074 + If you say Y here, the capabilities on all root processes within a
53075 + chroot jail will be lowered to stop module insertion, raw i/o,
53076 + system and net admin tasks, rebooting the system, modifying immutable
53077 + files, modifying IPC owned by another, and changing the system time.
53078 + This is left an option because it can break some apps. Disable this
53079 + if your chrooted apps are having problems performing those kinds of
53080 + tasks. If the sysctl option is enabled, a sysctl option with
53081 + name "chroot_caps" is created.
53082 +
53083 +endmenu
53084 +menu "Kernel Auditing"
53085 +depends on GRKERNSEC
53086 +
53087 +config GRKERNSEC_AUDIT_GROUP
53088 + bool "Single group for auditing"
53089 + help
53090 + If you say Y here, the exec, chdir, and (un)mount logging features
53091 + will only operate on a group you specify. This option is recommended
53092 + if you only want to watch certain users instead of having a large
53093 + amount of logs from the entire system. If the sysctl option is enabled,
53094 + a sysctl option with name "audit_group" is created.
53095 +
53096 +config GRKERNSEC_AUDIT_GID
53097 + int "GID for auditing"
53098 + depends on GRKERNSEC_AUDIT_GROUP
53099 + default 1007
53100 +
53101 +config GRKERNSEC_EXECLOG
53102 + bool "Exec logging"
53103 + help
53104 + If you say Y here, all execve() calls will be logged (since the
53105 + other exec*() calls are frontends to execve(), all execution
53106 + will be logged). Useful for shell-servers that like to keep track
53107 + of their users. If the sysctl option is enabled, a sysctl option with
53108 + name "exec_logging" is created.
53109 + WARNING: This option when enabled will produce a LOT of logs, especially
53110 + on an active system.
53111 +
53112 +config GRKERNSEC_RESLOG
53113 + bool "Resource logging"
53114 + help
53115 + If you say Y here, all attempts to overstep resource limits will
53116 + be logged with the resource name, the requested size, and the current
53117 + limit. It is highly recommended that you say Y here. If the sysctl
53118 + option is enabled, a sysctl option with name "resource_logging" is
53119 + created. If the RBAC system is enabled, the sysctl value is ignored.
53120 +
53121 +config GRKERNSEC_CHROOT_EXECLOG
53122 + bool "Log execs within chroot"
53123 + help
53124 + If you say Y here, all executions inside a chroot jail will be logged
53125 + to syslog. This can cause a large amount of logs if certain
53126 + applications (eg. djb's daemontools) are installed on the system, and
53127 + is therefore left as an option. If the sysctl option is enabled, a
53128 + sysctl option with name "chroot_execlog" is created.
53129 +
53130 +config GRKERNSEC_AUDIT_PTRACE
53131 + bool "Ptrace logging"
53132 + help
53133 + If you say Y here, all attempts to attach to a process via ptrace
53134 + will be logged. If the sysctl option is enabled, a sysctl option
53135 + with name "audit_ptrace" is created.
53136 +
53137 +config GRKERNSEC_AUDIT_CHDIR
53138 + bool "Chdir logging"
53139 + help
53140 + If you say Y here, all chdir() calls will be logged. If the sysctl
53141 + option is enabled, a sysctl option with name "audit_chdir" is created.
53142 +
53143 +config GRKERNSEC_AUDIT_MOUNT
53144 + bool "(Un)Mount logging"
53145 + help
53146 + If you say Y here, all mounts and unmounts will be logged. If the
53147 + sysctl option is enabled, a sysctl option with name "audit_mount" is
53148 + created.
53149 +
53150 +config GRKERNSEC_SIGNAL
53151 + bool "Signal logging"
53152 + help
53153 + If you say Y here, certain important signals will be logged, such as
53154 + SIGSEGV, which will as a result inform you of when a error in a program
53155 + occurred, which in some cases could mean a possible exploit attempt.
53156 + If the sysctl option is enabled, a sysctl option with name
53157 + "signal_logging" is created.
53158 +
53159 +config GRKERNSEC_FORKFAIL
53160 + bool "Fork failure logging"
53161 + help
53162 + If you say Y here, all failed fork() attempts will be logged.
53163 + This could suggest a fork bomb, or someone attempting to overstep
53164 + their process limit. If the sysctl option is enabled, a sysctl option
53165 + with name "forkfail_logging" is created.
53166 +
53167 +config GRKERNSEC_TIME
53168 + bool "Time change logging"
53169 + help
53170 + If you say Y here, any changes of the system clock will be logged.
53171 + If the sysctl option is enabled, a sysctl option with name
53172 + "timechange_logging" is created.
53173 +
53174 +config GRKERNSEC_PROC_IPADDR
53175 + bool "/proc/<pid>/ipaddr support"
53176 + help
53177 + If you say Y here, a new entry will be added to each /proc/<pid>
53178 + directory that contains the IP address of the person using the task.
53179 + The IP is carried across local TCP and AF_UNIX stream sockets.
53180 + This information can be useful for IDS/IPSes to perform remote response
53181 + to a local attack. The entry is readable by only the owner of the
53182 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
53183 + the RBAC system), and thus does not create privacy concerns.
53184 +
53185 +config GRKERNSEC_RWXMAP_LOG
53186 + bool 'Denied RWX mmap/mprotect logging'
53187 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
53188 + help
53189 + If you say Y here, calls to mmap() and mprotect() with explicit
53190 + usage of PROT_WRITE and PROT_EXEC together will be logged when
53191 + denied by the PAX_MPROTECT feature. If the sysctl option is
53192 + enabled, a sysctl option with name "rwxmap_logging" is created.
53193 +
53194 +config GRKERNSEC_AUDIT_TEXTREL
53195 + bool 'ELF text relocations logging (READ HELP)'
53196 + depends on PAX_MPROTECT
53197 + help
53198 + If you say Y here, text relocations will be logged with the filename
53199 + of the offending library or binary. The purpose of the feature is
53200 + to help Linux distribution developers get rid of libraries and
53201 + binaries that need text relocations which hinder the future progress
53202 + of PaX. Only Linux distribution developers should say Y here, and
53203 + never on a production machine, as this option creates an information
53204 + leak that could aid an attacker in defeating the randomization of
53205 + a single memory region. If the sysctl option is enabled, a sysctl
53206 + option with name "audit_textrel" is created.
53207 +
53208 +endmenu
53209 +
53210 +menu "Executable Protections"
53211 +depends on GRKERNSEC
53212 +
53213 +config GRKERNSEC_EXECVE
53214 + bool "Enforce RLIMIT_NPROC on execs"
53215 + help
53216 + If you say Y here, users with a resource limit on processes will
53217 + have the value checked during execve() calls. The current system
53218 + only checks the system limit during fork() calls. If the sysctl option
53219 + is enabled, a sysctl option with name "execve_limiting" is created.
53220 +
53221 +config GRKERNSEC_DMESG
53222 + bool "Dmesg(8) restriction"
53223 + help
53224 + If you say Y here, non-root users will not be able to use dmesg(8)
53225 + to view up to the last 4kb of messages in the kernel's log buffer.
53226 + The kernel's log buffer often contains kernel addresses and other
53227 + identifying information useful to an attacker in fingerprinting a
53228 + system for a targeted exploit.
53229 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
53230 + created.
53231 +
53232 +config GRKERNSEC_HARDEN_PTRACE
53233 + bool "Deter ptrace-based process snooping"
53234 + help
53235 + If you say Y here, TTY sniffers and other malicious monitoring
53236 + programs implemented through ptrace will be defeated. If you
53237 + have been using the RBAC system, this option has already been
53238 + enabled for several years for all users, with the ability to make
53239 + fine-grained exceptions.
53240 +
53241 + This option only affects the ability of non-root users to ptrace
53242 + processes that are not a descendent of the ptracing process.
53243 + This means that strace ./binary and gdb ./binary will still work,
53244 + but attaching to arbitrary processes will not. If the sysctl
53245 + option is enabled, a sysctl option with name "harden_ptrace" is
53246 + created.
53247 +
53248 +config GRKERNSEC_TPE
53249 + bool "Trusted Path Execution (TPE)"
53250 + help
53251 + If you say Y here, you will be able to choose a gid to add to the
53252 + supplementary groups of users you want to mark as "untrusted."
53253 + These users will not be able to execute any files that are not in
53254 + root-owned directories writable only by root. If the sysctl option
53255 + is enabled, a sysctl option with name "tpe" is created.
53256 +
53257 +config GRKERNSEC_TPE_ALL
53258 + bool "Partially restrict all non-root users"
53259 + depends on GRKERNSEC_TPE
53260 + help
53261 + If you say Y here, all non-root users will be covered under
53262 + a weaker TPE restriction. This is separate from, and in addition to,
53263 + the main TPE options that you have selected elsewhere. Thus, if a
53264 + "trusted" GID is chosen, this restriction applies to even that GID.
53265 + Under this restriction, all non-root users will only be allowed to
53266 + execute files in directories they own that are not group or
53267 + world-writable, or in directories owned by root and writable only by
53268 + root. If the sysctl option is enabled, a sysctl option with name
53269 + "tpe_restrict_all" is created.
53270 +
53271 +config GRKERNSEC_TPE_INVERT
53272 + bool "Invert GID option"
53273 + depends on GRKERNSEC_TPE
53274 + help
53275 + If you say Y here, the group you specify in the TPE configuration will
53276 + decide what group TPE restrictions will be *disabled* for. This
53277 + option is useful if you want TPE restrictions to be applied to most
53278 + users on the system. If the sysctl option is enabled, a sysctl option
53279 + with name "tpe_invert" is created. Unlike other sysctl options, this
53280 + entry will default to on for backward-compatibility.
53281 +
53282 +config GRKERNSEC_TPE_GID
53283 + int "GID for untrusted users"
53284 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
53285 + default 1005
53286 + help
53287 + Setting this GID determines what group TPE restrictions will be
53288 + *enabled* for. If the sysctl option is enabled, a sysctl option
53289 + with name "tpe_gid" is created.
53290 +
53291 +config GRKERNSEC_TPE_GID
53292 + int "GID for trusted users"
53293 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
53294 + default 1005
53295 + help
53296 + Setting this GID determines what group TPE restrictions will be
53297 + *disabled* for. If the sysctl option is enabled, a sysctl option
53298 + with name "tpe_gid" is created.
53299 +
53300 +endmenu
53301 +menu "Network Protections"
53302 +depends on GRKERNSEC
53303 +
53304 +config GRKERNSEC_RANDNET
53305 + bool "Larger entropy pools"
53306 + help
53307 + If you say Y here, the entropy pools used for many features of Linux
53308 + and grsecurity will be doubled in size. Since several grsecurity
53309 + features use additional randomness, it is recommended that you say Y
53310 + here. Saying Y here has a similar effect as modifying
53311 + /proc/sys/kernel/random/poolsize.
53312 +
53313 +config GRKERNSEC_BLACKHOLE
53314 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
53315 + help
53316 + If you say Y here, neither TCP resets nor ICMP
53317 + destination-unreachable packets will be sent in response to packets
53318 + sent to ports for which no associated listening process exists.
53319 + This feature supports both IPV4 and IPV6 and exempts the
53320 + loopback interface from blackholing. Enabling this feature
53321 + makes a host more resilient to DoS attacks and reduces network
53322 + visibility against scanners.
53323 +
53324 + The blackhole feature as-implemented is equivalent to the FreeBSD
53325 + blackhole feature, as it prevents RST responses to all packets, not
53326 + just SYNs. Under most application behavior this causes no
53327 + problems, but applications (like haproxy) may not close certain
53328 + connections in a way that cleanly terminates them on the remote
53329 + end, leaving the remote host in LAST_ACK state. Because of this
53330 + side-effect and to prevent intentional LAST_ACK DoSes, this
53331 + feature also adds automatic mitigation against such attacks.
53332 + The mitigation drastically reduces the amount of time a socket
53333 + can spend in LAST_ACK state. If you're using haproxy and not
53334 + all servers it connects to have this option enabled, consider
53335 + disabling this feature on the haproxy host.
53336 +
53337 + If the sysctl option is enabled, two sysctl options with names
53338 + "ip_blackhole" and "lastack_retries" will be created.
53339 + While "ip_blackhole" takes the standard zero/non-zero on/off
53340 + toggle, "lastack_retries" uses the same kinds of values as
53341 + "tcp_retries1" and "tcp_retries2". The default value of 4
53342 + prevents a socket from lasting more than 45 seconds in LAST_ACK
53343 + state.
53344 +
53345 +config GRKERNSEC_SOCKET
53346 + bool "Socket restrictions"
53347 + help
53348 + If you say Y here, you will be able to choose from several options.
53349 + If you assign a GID on your system and add it to the supplementary
53350 + groups of users you want to restrict socket access to, this patch
53351 + will perform up to three things, based on the option(s) you choose.
53352 +
53353 +config GRKERNSEC_SOCKET_ALL
53354 + bool "Deny any sockets to group"
53355 + depends on GRKERNSEC_SOCKET
53356 + help
53357 + If you say Y here, you will be able to choose a GID of whose users will
53358 + be unable to connect to other hosts from your machine or run server
53359 + applications from your machine. If the sysctl option is enabled, a
53360 + sysctl option with name "socket_all" is created.
53361 +
53362 +config GRKERNSEC_SOCKET_ALL_GID
53363 + int "GID to deny all sockets for"
53364 + depends on GRKERNSEC_SOCKET_ALL
53365 + default 1004
53366 + help
53367 + Here you can choose the GID to disable socket access for. Remember to
53368 + add the users you want socket access disabled for to the GID
53369 + specified here. If the sysctl option is enabled, a sysctl option
53370 + with name "socket_all_gid" is created.
53371 +
53372 +config GRKERNSEC_SOCKET_CLIENT
53373 + bool "Deny client sockets to group"
53374 + depends on GRKERNSEC_SOCKET
53375 + help
53376 + If you say Y here, you will be able to choose a GID of whose users will
53377 + be unable to connect to other hosts from your machine, but will be
53378 + able to run servers. If this option is enabled, all users in the group
53379 + you specify will have to use passive mode when initiating ftp transfers
53380 + from the shell on your machine. If the sysctl option is enabled, a
53381 + sysctl option with name "socket_client" is created.
53382 +
53383 +config GRKERNSEC_SOCKET_CLIENT_GID
53384 + int "GID to deny client sockets for"
53385 + depends on GRKERNSEC_SOCKET_CLIENT
53386 + default 1003
53387 + help
53388 + Here you can choose the GID to disable client socket access for.
53389 + Remember to add the users you want client socket access disabled for to
53390 + the GID specified here. If the sysctl option is enabled, a sysctl
53391 + option with name "socket_client_gid" is created.
53392 +
53393 +config GRKERNSEC_SOCKET_SERVER
53394 + bool "Deny server sockets to group"
53395 + depends on GRKERNSEC_SOCKET
53396 + help
53397 + If you say Y here, you will be able to choose a GID of whose users will
53398 + be unable to run server applications from your machine. If the sysctl
53399 + option is enabled, a sysctl option with name "socket_server" is created.
53400 +
53401 +config GRKERNSEC_SOCKET_SERVER_GID
53402 + int "GID to deny server sockets for"
53403 + depends on GRKERNSEC_SOCKET_SERVER
53404 + default 1002
53405 + help
53406 + Here you can choose the GID to disable server socket access for.
53407 + Remember to add the users you want server socket access disabled for to
53408 + the GID specified here. If the sysctl option is enabled, a sysctl
53409 + option with name "socket_server_gid" is created.
53410 +
53411 +endmenu
53412 +menu "Sysctl support"
53413 +depends on GRKERNSEC && SYSCTL
53414 +
53415 +config GRKERNSEC_SYSCTL
53416 + bool "Sysctl support"
53417 + help
53418 + If you say Y here, you will be able to change the options that
53419 + grsecurity runs with at bootup, without having to recompile your
53420 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
53421 + to enable (1) or disable (0) various features. All the sysctl entries
53422 + are mutable until the "grsec_lock" entry is set to a non-zero value.
53423 + All features enabled in the kernel configuration are disabled at boot
53424 + if you do not say Y to the "Turn on features by default" option.
53425 + All options should be set at startup, and the grsec_lock entry should
53426 + be set to a non-zero value after all the options are set.
53427 + *THIS IS EXTREMELY IMPORTANT*
53428 +
53429 +config GRKERNSEC_SYSCTL_DISTRO
53430 + bool "Extra sysctl support for distro makers (READ HELP)"
53431 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
53432 + help
53433 + If you say Y here, additional sysctl options will be created
53434 + for features that affect processes running as root. Therefore,
53435 + it is critical when using this option that the grsec_lock entry be
53436 + enabled after boot. Only distros with prebuilt kernel packages
53437 + with this option enabled that can ensure grsec_lock is enabled
53438 + after boot should use this option.
53439 + *Failure to set grsec_lock after boot makes all grsec features
53440 + this option covers useless*
53441 +
53442 + Currently this option creates the following sysctl entries:
53443 + "Disable Privileged I/O": "disable_priv_io"
53444 +
53445 +config GRKERNSEC_SYSCTL_ON
53446 + bool "Turn on features by default"
53447 + depends on GRKERNSEC_SYSCTL
53448 + help
53449 + If you say Y here, instead of having all features enabled in the
53450 + kernel configuration disabled at boot time, the features will be
53451 + enabled at boot time. It is recommended you say Y here unless
53452 + there is some reason you would want all sysctl-tunable features to
53453 + be disabled by default. As mentioned elsewhere, it is important
53454 + to enable the grsec_lock entry once you have finished modifying
53455 + the sysctl entries.
53456 +
53457 +endmenu
53458 +menu "Logging Options"
53459 +depends on GRKERNSEC
53460 +
53461 +config GRKERNSEC_FLOODTIME
53462 + int "Seconds in between log messages (minimum)"
53463 + default 10
53464 + help
53465 + This option allows you to enforce the number of seconds between
53466 + grsecurity log messages. The default should be suitable for most
53467 + people, however, if you choose to change it, choose a value small enough
53468 + to allow informative logs to be produced, but large enough to
53469 + prevent flooding.
53470 +
53471 +config GRKERNSEC_FLOODBURST
53472 + int "Number of messages in a burst (maximum)"
53473 + default 4
53474 + help
53475 + This option allows you to choose the maximum number of messages allowed
53476 + within the flood time interval you chose in a separate option. The
53477 + default should be suitable for most people, however if you find that
53478 + many of your logs are being interpreted as flooding, you may want to
53479 + raise this value.
53480 +
53481 +endmenu
53482 +
53483 +endmenu
53484 diff -urNp linux-2.6.32.42/grsecurity/Makefile linux-2.6.32.42/grsecurity/Makefile
53485 --- linux-2.6.32.42/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
53486 +++ linux-2.6.32.42/grsecurity/Makefile 2011-05-24 20:27:46.000000000 -0400
53487 @@ -0,0 +1,33 @@
53488 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
53489 +# during 2001-2009 it has been completely redesigned by Brad Spengler
53490 +# into an RBAC system
53491 +#
53492 +# All code in this directory and various hooks inserted throughout the kernel
53493 +# are copyright Brad Spengler - Open Source Security, Inc., and released
53494 +# under the GPL v2 or higher
53495 +
53496 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
53497 + grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
53498 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
53499 +
53500 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
53501 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
53502 + gracl_learn.o grsec_log.o
53503 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
53504 +
53505 +ifdef CONFIG_NET
53506 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
53507 +endif
53508 +
53509 +ifndef CONFIG_GRKERNSEC
53510 +obj-y += grsec_disabled.o
53511 +endif
53512 +
53513 +ifdef CONFIG_GRKERNSEC_HIDESYM
53514 +extra-y := grsec_hidesym.o
53515 +$(obj)/grsec_hidesym.o:
53516 + @-chmod -f 500 /boot
53517 + @-chmod -f 500 /lib/modules
53518 + @-chmod -f 700 .
53519 + @echo ' grsec: protected kernel image paths'
53520 +endif
53521 diff -urNp linux-2.6.32.42/include/acpi/acpi_drivers.h linux-2.6.32.42/include/acpi/acpi_drivers.h
53522 --- linux-2.6.32.42/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
53523 +++ linux-2.6.32.42/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
53524 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
53525 Dock Station
53526 -------------------------------------------------------------------------- */
53527 struct acpi_dock_ops {
53528 - acpi_notify_handler handler;
53529 - acpi_notify_handler uevent;
53530 + const acpi_notify_handler handler;
53531 + const acpi_notify_handler uevent;
53532 };
53533
53534 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
53535 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
53536 extern int register_dock_notifier(struct notifier_block *nb);
53537 extern void unregister_dock_notifier(struct notifier_block *nb);
53538 extern int register_hotplug_dock_device(acpi_handle handle,
53539 - struct acpi_dock_ops *ops,
53540 + const struct acpi_dock_ops *ops,
53541 void *context);
53542 extern void unregister_hotplug_dock_device(acpi_handle handle);
53543 #else
53544 @@ -144,7 +144,7 @@ static inline void unregister_dock_notif
53545 {
53546 }
53547 static inline int register_hotplug_dock_device(acpi_handle handle,
53548 - struct acpi_dock_ops *ops,
53549 + const struct acpi_dock_ops *ops,
53550 void *context)
53551 {
53552 return -ENODEV;
53553 diff -urNp linux-2.6.32.42/include/asm-generic/atomic-long.h linux-2.6.32.42/include/asm-generic/atomic-long.h
53554 --- linux-2.6.32.42/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
53555 +++ linux-2.6.32.42/include/asm-generic/atomic-long.h 2011-05-16 21:46:57.000000000 -0400
53556 @@ -22,6 +22,12 @@
53557
53558 typedef atomic64_t atomic_long_t;
53559
53560 +#ifdef CONFIG_PAX_REFCOUNT
53561 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
53562 +#else
53563 +typedef atomic64_t atomic_long_unchecked_t;
53564 +#endif
53565 +
53566 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
53567
53568 static inline long atomic_long_read(atomic_long_t *l)
53569 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
53570 return (long)atomic64_read(v);
53571 }
53572
53573 +#ifdef CONFIG_PAX_REFCOUNT
53574 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53575 +{
53576 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53577 +
53578 + return (long)atomic64_read_unchecked(v);
53579 +}
53580 +#endif
53581 +
53582 static inline void atomic_long_set(atomic_long_t *l, long i)
53583 {
53584 atomic64_t *v = (atomic64_t *)l;
53585 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
53586 atomic64_set(v, i);
53587 }
53588
53589 +#ifdef CONFIG_PAX_REFCOUNT
53590 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53591 +{
53592 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53593 +
53594 + atomic64_set_unchecked(v, i);
53595 +}
53596 +#endif
53597 +
53598 static inline void atomic_long_inc(atomic_long_t *l)
53599 {
53600 atomic64_t *v = (atomic64_t *)l;
53601 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
53602 atomic64_inc(v);
53603 }
53604
53605 +#ifdef CONFIG_PAX_REFCOUNT
53606 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53607 +{
53608 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53609 +
53610 + atomic64_inc_unchecked(v);
53611 +}
53612 +#endif
53613 +
53614 static inline void atomic_long_dec(atomic_long_t *l)
53615 {
53616 atomic64_t *v = (atomic64_t *)l;
53617 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
53618 atomic64_dec(v);
53619 }
53620
53621 +#ifdef CONFIG_PAX_REFCOUNT
53622 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53623 +{
53624 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53625 +
53626 + atomic64_dec_unchecked(v);
53627 +}
53628 +#endif
53629 +
53630 static inline void atomic_long_add(long i, atomic_long_t *l)
53631 {
53632 atomic64_t *v = (atomic64_t *)l;
53633 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
53634 atomic64_add(i, v);
53635 }
53636
53637 +#ifdef CONFIG_PAX_REFCOUNT
53638 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53639 +{
53640 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53641 +
53642 + atomic64_add_unchecked(i, v);
53643 +}
53644 +#endif
53645 +
53646 static inline void atomic_long_sub(long i, atomic_long_t *l)
53647 {
53648 atomic64_t *v = (atomic64_t *)l;
53649 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
53650 return (long)atomic64_inc_return(v);
53651 }
53652
53653 +#ifdef CONFIG_PAX_REFCOUNT
53654 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53655 +{
53656 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53657 +
53658 + return (long)atomic64_inc_return_unchecked(v);
53659 +}
53660 +#endif
53661 +
53662 static inline long atomic_long_dec_return(atomic_long_t *l)
53663 {
53664 atomic64_t *v = (atomic64_t *)l;
53665 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
53666
53667 typedef atomic_t atomic_long_t;
53668
53669 +#ifdef CONFIG_PAX_REFCOUNT
53670 +typedef atomic_unchecked_t atomic_long_unchecked_t;
53671 +#else
53672 +typedef atomic_t atomic_long_unchecked_t;
53673 +#endif
53674 +
53675 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
53676 static inline long atomic_long_read(atomic_long_t *l)
53677 {
53678 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
53679 return (long)atomic_read(v);
53680 }
53681
53682 +#ifdef CONFIG_PAX_REFCOUNT
53683 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53684 +{
53685 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53686 +
53687 + return (long)atomic_read_unchecked(v);
53688 +}
53689 +#endif
53690 +
53691 static inline void atomic_long_set(atomic_long_t *l, long i)
53692 {
53693 atomic_t *v = (atomic_t *)l;
53694 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
53695 atomic_set(v, i);
53696 }
53697
53698 +#ifdef CONFIG_PAX_REFCOUNT
53699 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53700 +{
53701 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53702 +
53703 + atomic_set_unchecked(v, i);
53704 +}
53705 +#endif
53706 +
53707 static inline void atomic_long_inc(atomic_long_t *l)
53708 {
53709 atomic_t *v = (atomic_t *)l;
53710 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
53711 atomic_inc(v);
53712 }
53713
53714 +#ifdef CONFIG_PAX_REFCOUNT
53715 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53716 +{
53717 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53718 +
53719 + atomic_inc_unchecked(v);
53720 +}
53721 +#endif
53722 +
53723 static inline void atomic_long_dec(atomic_long_t *l)
53724 {
53725 atomic_t *v = (atomic_t *)l;
53726 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
53727 atomic_dec(v);
53728 }
53729
53730 +#ifdef CONFIG_PAX_REFCOUNT
53731 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53732 +{
53733 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53734 +
53735 + atomic_dec_unchecked(v);
53736 +}
53737 +#endif
53738 +
53739 static inline void atomic_long_add(long i, atomic_long_t *l)
53740 {
53741 atomic_t *v = (atomic_t *)l;
53742 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long
53743 atomic_add(i, v);
53744 }
53745
53746 +#ifdef CONFIG_PAX_REFCOUNT
53747 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53748 +{
53749 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53750 +
53751 + atomic_add_unchecked(i, v);
53752 +}
53753 +#endif
53754 +
53755 static inline void atomic_long_sub(long i, atomic_long_t *l)
53756 {
53757 atomic_t *v = (atomic_t *)l;
53758 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
53759 return (long)atomic_inc_return(v);
53760 }
53761
53762 +#ifdef CONFIG_PAX_REFCOUNT
53763 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53764 +{
53765 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53766 +
53767 + return (long)atomic_inc_return_unchecked(v);
53768 +}
53769 +#endif
53770 +
53771 static inline long atomic_long_dec_return(atomic_long_t *l)
53772 {
53773 atomic_t *v = (atomic_t *)l;
53774 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
53775
53776 #endif /* BITS_PER_LONG == 64 */
53777
53778 +#ifdef CONFIG_PAX_REFCOUNT
53779 +static inline void pax_refcount_needs_these_functions(void)
53780 +{
53781 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
53782 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
53783 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
53784 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
53785 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
53786 + atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
53787 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
53788 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
53789 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
53790 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
53791 + atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
53792 +
53793 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
53794 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
53795 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
53796 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
53797 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
53798 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
53799 +}
53800 +#else
53801 +#define atomic_read_unchecked(v) atomic_read(v)
53802 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
53803 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
53804 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
53805 +#define atomic_inc_unchecked(v) atomic_inc(v)
53806 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
53807 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
53808 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
53809 +#define atomic_dec_unchecked(v) atomic_dec(v)
53810 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
53811 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
53812 +
53813 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
53814 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
53815 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
53816 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
53817 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
53818 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
53819 +#endif
53820 +
53821 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
53822 diff -urNp linux-2.6.32.42/include/asm-generic/cache.h linux-2.6.32.42/include/asm-generic/cache.h
53823 --- linux-2.6.32.42/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
53824 +++ linux-2.6.32.42/include/asm-generic/cache.h 2011-05-04 17:56:28.000000000 -0400
53825 @@ -6,7 +6,7 @@
53826 * cache lines need to provide their own cache.h.
53827 */
53828
53829 -#define L1_CACHE_SHIFT 5
53830 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
53831 +#define L1_CACHE_SHIFT 5U
53832 +#define L1_CACHE_BYTES (1U << L1_CACHE_SHIFT)
53833
53834 #endif /* __ASM_GENERIC_CACHE_H */
53835 diff -urNp linux-2.6.32.42/include/asm-generic/dma-mapping-common.h linux-2.6.32.42/include/asm-generic/dma-mapping-common.h
53836 --- linux-2.6.32.42/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
53837 +++ linux-2.6.32.42/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
53838 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
53839 enum dma_data_direction dir,
53840 struct dma_attrs *attrs)
53841 {
53842 - struct dma_map_ops *ops = get_dma_ops(dev);
53843 + const struct dma_map_ops *ops = get_dma_ops(dev);
53844 dma_addr_t addr;
53845
53846 kmemcheck_mark_initialized(ptr, size);
53847 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
53848 enum dma_data_direction dir,
53849 struct dma_attrs *attrs)
53850 {
53851 - struct dma_map_ops *ops = get_dma_ops(dev);
53852 + const struct dma_map_ops *ops = get_dma_ops(dev);
53853
53854 BUG_ON(!valid_dma_direction(dir));
53855 if (ops->unmap_page)
53856 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
53857 int nents, enum dma_data_direction dir,
53858 struct dma_attrs *attrs)
53859 {
53860 - struct dma_map_ops *ops = get_dma_ops(dev);
53861 + const struct dma_map_ops *ops = get_dma_ops(dev);
53862 int i, ents;
53863 struct scatterlist *s;
53864
53865 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
53866 int nents, enum dma_data_direction dir,
53867 struct dma_attrs *attrs)
53868 {
53869 - struct dma_map_ops *ops = get_dma_ops(dev);
53870 + const struct dma_map_ops *ops = get_dma_ops(dev);
53871
53872 BUG_ON(!valid_dma_direction(dir));
53873 debug_dma_unmap_sg(dev, sg, nents, dir);
53874 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
53875 size_t offset, size_t size,
53876 enum dma_data_direction dir)
53877 {
53878 - struct dma_map_ops *ops = get_dma_ops(dev);
53879 + const struct dma_map_ops *ops = get_dma_ops(dev);
53880 dma_addr_t addr;
53881
53882 kmemcheck_mark_initialized(page_address(page) + offset, size);
53883 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
53884 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
53885 size_t size, enum dma_data_direction dir)
53886 {
53887 - struct dma_map_ops *ops = get_dma_ops(dev);
53888 + const struct dma_map_ops *ops = get_dma_ops(dev);
53889
53890 BUG_ON(!valid_dma_direction(dir));
53891 if (ops->unmap_page)
53892 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
53893 size_t size,
53894 enum dma_data_direction dir)
53895 {
53896 - struct dma_map_ops *ops = get_dma_ops(dev);
53897 + const struct dma_map_ops *ops = get_dma_ops(dev);
53898
53899 BUG_ON(!valid_dma_direction(dir));
53900 if (ops->sync_single_for_cpu)
53901 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
53902 dma_addr_t addr, size_t size,
53903 enum dma_data_direction dir)
53904 {
53905 - struct dma_map_ops *ops = get_dma_ops(dev);
53906 + const struct dma_map_ops *ops = get_dma_ops(dev);
53907
53908 BUG_ON(!valid_dma_direction(dir));
53909 if (ops->sync_single_for_device)
53910 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range
53911 size_t size,
53912 enum dma_data_direction dir)
53913 {
53914 - struct dma_map_ops *ops = get_dma_ops(dev);
53915 + const struct dma_map_ops *ops = get_dma_ops(dev);
53916
53917 BUG_ON(!valid_dma_direction(dir));
53918 if (ops->sync_single_range_for_cpu) {
53919 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range
53920 size_t size,
53921 enum dma_data_direction dir)
53922 {
53923 - struct dma_map_ops *ops = get_dma_ops(dev);
53924 + const struct dma_map_ops *ops = get_dma_ops(dev);
53925
53926 BUG_ON(!valid_dma_direction(dir));
53927 if (ops->sync_single_range_for_device) {
53928 @@ -155,7 +155,7 @@ static inline void
53929 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
53930 int nelems, enum dma_data_direction dir)
53931 {
53932 - struct dma_map_ops *ops = get_dma_ops(dev);
53933 + const struct dma_map_ops *ops = get_dma_ops(dev);
53934
53935 BUG_ON(!valid_dma_direction(dir));
53936 if (ops->sync_sg_for_cpu)
53937 @@ -167,7 +167,7 @@ static inline void
53938 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
53939 int nelems, enum dma_data_direction dir)
53940 {
53941 - struct dma_map_ops *ops = get_dma_ops(dev);
53942 + const struct dma_map_ops *ops = get_dma_ops(dev);
53943
53944 BUG_ON(!valid_dma_direction(dir));
53945 if (ops->sync_sg_for_device)
53946 diff -urNp linux-2.6.32.42/include/asm-generic/futex.h linux-2.6.32.42/include/asm-generic/futex.h
53947 --- linux-2.6.32.42/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
53948 +++ linux-2.6.32.42/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
53949 @@ -6,7 +6,7 @@
53950 #include <asm/errno.h>
53951
53952 static inline int
53953 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
53954 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
53955 {
53956 int op = (encoded_op >> 28) & 7;
53957 int cmp = (encoded_op >> 24) & 15;
53958 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
53959 }
53960
53961 static inline int
53962 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
53963 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
53964 {
53965 return -ENOSYS;
53966 }
53967 diff -urNp linux-2.6.32.42/include/asm-generic/int-l64.h linux-2.6.32.42/include/asm-generic/int-l64.h
53968 --- linux-2.6.32.42/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
53969 +++ linux-2.6.32.42/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
53970 @@ -46,6 +46,8 @@ typedef unsigned int u32;
53971 typedef signed long s64;
53972 typedef unsigned long u64;
53973
53974 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
53975 +
53976 #define S8_C(x) x
53977 #define U8_C(x) x ## U
53978 #define S16_C(x) x
53979 diff -urNp linux-2.6.32.42/include/asm-generic/int-ll64.h linux-2.6.32.42/include/asm-generic/int-ll64.h
53980 --- linux-2.6.32.42/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
53981 +++ linux-2.6.32.42/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
53982 @@ -51,6 +51,8 @@ typedef unsigned int u32;
53983 typedef signed long long s64;
53984 typedef unsigned long long u64;
53985
53986 +typedef unsigned long long intoverflow_t;
53987 +
53988 #define S8_C(x) x
53989 #define U8_C(x) x ## U
53990 #define S16_C(x) x
53991 diff -urNp linux-2.6.32.42/include/asm-generic/kmap_types.h linux-2.6.32.42/include/asm-generic/kmap_types.h
53992 --- linux-2.6.32.42/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
53993 +++ linux-2.6.32.42/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
53994 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
53995 KMAP_D(16) KM_IRQ_PTE,
53996 KMAP_D(17) KM_NMI,
53997 KMAP_D(18) KM_NMI_PTE,
53998 -KMAP_D(19) KM_TYPE_NR
53999 +KMAP_D(19) KM_CLEARPAGE,
54000 +KMAP_D(20) KM_TYPE_NR
54001 };
54002
54003 #undef KMAP_D
54004 diff -urNp linux-2.6.32.42/include/asm-generic/pgtable.h linux-2.6.32.42/include/asm-generic/pgtable.h
54005 --- linux-2.6.32.42/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
54006 +++ linux-2.6.32.42/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
54007 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
54008 unsigned long size);
54009 #endif
54010
54011 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
54012 +static inline unsigned long pax_open_kernel(void) { return 0; }
54013 +#endif
54014 +
54015 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
54016 +static inline unsigned long pax_close_kernel(void) { return 0; }
54017 +#endif
54018 +
54019 #endif /* !__ASSEMBLY__ */
54020
54021 #endif /* _ASM_GENERIC_PGTABLE_H */
54022 diff -urNp linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h
54023 --- linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
54024 +++ linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
54025 @@ -1,14 +1,19 @@
54026 #ifndef _PGTABLE_NOPMD_H
54027 #define _PGTABLE_NOPMD_H
54028
54029 -#ifndef __ASSEMBLY__
54030 -
54031 #include <asm-generic/pgtable-nopud.h>
54032
54033 -struct mm_struct;
54034 -
54035 #define __PAGETABLE_PMD_FOLDED
54036
54037 +#define PMD_SHIFT PUD_SHIFT
54038 +#define PTRS_PER_PMD 1
54039 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
54040 +#define PMD_MASK (~(PMD_SIZE-1))
54041 +
54042 +#ifndef __ASSEMBLY__
54043 +
54044 +struct mm_struct;
54045 +
54046 /*
54047 * Having the pmd type consist of a pud gets the size right, and allows
54048 * us to conceptually access the pud entry that this pmd is folded into
54049 @@ -16,11 +21,6 @@ struct mm_struct;
54050 */
54051 typedef struct { pud_t pud; } pmd_t;
54052
54053 -#define PMD_SHIFT PUD_SHIFT
54054 -#define PTRS_PER_PMD 1
54055 -#define PMD_SIZE (1UL << PMD_SHIFT)
54056 -#define PMD_MASK (~(PMD_SIZE-1))
54057 -
54058 /*
54059 * The "pud_xxx()" functions here are trivial for a folded two-level
54060 * setup: the pmd is never bad, and a pmd always exists (as it's folded
54061 diff -urNp linux-2.6.32.42/include/asm-generic/pgtable-nopud.h linux-2.6.32.42/include/asm-generic/pgtable-nopud.h
54062 --- linux-2.6.32.42/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
54063 +++ linux-2.6.32.42/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
54064 @@ -1,10 +1,15 @@
54065 #ifndef _PGTABLE_NOPUD_H
54066 #define _PGTABLE_NOPUD_H
54067
54068 -#ifndef __ASSEMBLY__
54069 -
54070 #define __PAGETABLE_PUD_FOLDED
54071
54072 +#define PUD_SHIFT PGDIR_SHIFT
54073 +#define PTRS_PER_PUD 1
54074 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
54075 +#define PUD_MASK (~(PUD_SIZE-1))
54076 +
54077 +#ifndef __ASSEMBLY__
54078 +
54079 /*
54080 * Having the pud type consist of a pgd gets the size right, and allows
54081 * us to conceptually access the pgd entry that this pud is folded into
54082 @@ -12,11 +17,6 @@
54083 */
54084 typedef struct { pgd_t pgd; } pud_t;
54085
54086 -#define PUD_SHIFT PGDIR_SHIFT
54087 -#define PTRS_PER_PUD 1
54088 -#define PUD_SIZE (1UL << PUD_SHIFT)
54089 -#define PUD_MASK (~(PUD_SIZE-1))
54090 -
54091 /*
54092 * The "pgd_xxx()" functions here are trivial for a folded two-level
54093 * setup: the pud is never bad, and a pud always exists (as it's folded
54094 diff -urNp linux-2.6.32.42/include/asm-generic/vmlinux.lds.h linux-2.6.32.42/include/asm-generic/vmlinux.lds.h
54095 --- linux-2.6.32.42/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
54096 +++ linux-2.6.32.42/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
54097 @@ -199,6 +199,7 @@
54098 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
54099 VMLINUX_SYMBOL(__start_rodata) = .; \
54100 *(.rodata) *(.rodata.*) \
54101 + *(.data.read_only) \
54102 *(__vermagic) /* Kernel version magic */ \
54103 *(__markers_strings) /* Markers: strings */ \
54104 *(__tracepoints_strings)/* Tracepoints: strings */ \
54105 @@ -656,22 +657,24 @@
54106 * section in the linker script will go there too. @phdr should have
54107 * a leading colon.
54108 *
54109 - * Note that this macros defines __per_cpu_load as an absolute symbol.
54110 + * Note that this macros defines per_cpu_load as an absolute symbol.
54111 * If there is no need to put the percpu section at a predetermined
54112 * address, use PERCPU().
54113 */
54114 #define PERCPU_VADDR(vaddr, phdr) \
54115 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
54116 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
54117 + per_cpu_load = .; \
54118 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
54119 - LOAD_OFFSET) { \
54120 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
54121 VMLINUX_SYMBOL(__per_cpu_start) = .; \
54122 *(.data.percpu.first) \
54123 - *(.data.percpu.page_aligned) \
54124 *(.data.percpu) \
54125 + . = ALIGN(PAGE_SIZE); \
54126 + *(.data.percpu.page_aligned) \
54127 *(.data.percpu.shared_aligned) \
54128 VMLINUX_SYMBOL(__per_cpu_end) = .; \
54129 } phdr \
54130 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
54131 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
54132
54133 /**
54134 * PERCPU - define output section for percpu area, simple version
54135 diff -urNp linux-2.6.32.42/include/drm/drmP.h linux-2.6.32.42/include/drm/drmP.h
54136 --- linux-2.6.32.42/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
54137 +++ linux-2.6.32.42/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
54138 @@ -71,6 +71,7 @@
54139 #include <linux/workqueue.h>
54140 #include <linux/poll.h>
54141 #include <asm/pgalloc.h>
54142 +#include <asm/local.h>
54143 #include "drm.h"
54144
54145 #include <linux/idr.h>
54146 @@ -814,7 +815,7 @@ struct drm_driver {
54147 void (*vgaarb_irq)(struct drm_device *dev, bool state);
54148
54149 /* Driver private ops for this object */
54150 - struct vm_operations_struct *gem_vm_ops;
54151 + const struct vm_operations_struct *gem_vm_ops;
54152
54153 int major;
54154 int minor;
54155 @@ -917,7 +918,7 @@ struct drm_device {
54156
54157 /** \name Usage Counters */
54158 /*@{ */
54159 - int open_count; /**< Outstanding files open */
54160 + local_t open_count; /**< Outstanding files open */
54161 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
54162 atomic_t vma_count; /**< Outstanding vma areas open */
54163 int buf_use; /**< Buffers in use -- cannot alloc */
54164 @@ -928,7 +929,7 @@ struct drm_device {
54165 /*@{ */
54166 unsigned long counters;
54167 enum drm_stat_type types[15];
54168 - atomic_t counts[15];
54169 + atomic_unchecked_t counts[15];
54170 /*@} */
54171
54172 struct list_head filelist;
54173 @@ -1016,7 +1017,7 @@ struct drm_device {
54174 struct pci_controller *hose;
54175 #endif
54176 struct drm_sg_mem *sg; /**< Scatter gather memory */
54177 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
54178 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
54179 void *dev_private; /**< device private data */
54180 void *mm_private;
54181 struct address_space *dev_mapping;
54182 @@ -1042,11 +1043,11 @@ struct drm_device {
54183 spinlock_t object_name_lock;
54184 struct idr object_name_idr;
54185 atomic_t object_count;
54186 - atomic_t object_memory;
54187 + atomic_unchecked_t object_memory;
54188 atomic_t pin_count;
54189 - atomic_t pin_memory;
54190 + atomic_unchecked_t pin_memory;
54191 atomic_t gtt_count;
54192 - atomic_t gtt_memory;
54193 + atomic_unchecked_t gtt_memory;
54194 uint32_t gtt_total;
54195 uint32_t invalidate_domains; /* domains pending invalidation */
54196 uint32_t flush_domains; /* domains pending flush */
54197 diff -urNp linux-2.6.32.42/include/linux/a.out.h linux-2.6.32.42/include/linux/a.out.h
54198 --- linux-2.6.32.42/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
54199 +++ linux-2.6.32.42/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
54200 @@ -39,6 +39,14 @@ enum machine_type {
54201 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
54202 };
54203
54204 +/* Constants for the N_FLAGS field */
54205 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54206 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
54207 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
54208 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
54209 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54210 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54211 +
54212 #if !defined (N_MAGIC)
54213 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
54214 #endif
54215 diff -urNp linux-2.6.32.42/include/linux/atmdev.h linux-2.6.32.42/include/linux/atmdev.h
54216 --- linux-2.6.32.42/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
54217 +++ linux-2.6.32.42/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
54218 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
54219 #endif
54220
54221 struct k_atm_aal_stats {
54222 -#define __HANDLE_ITEM(i) atomic_t i
54223 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54224 __AAL_STAT_ITEMS
54225 #undef __HANDLE_ITEM
54226 };
54227 diff -urNp linux-2.6.32.42/include/linux/backlight.h linux-2.6.32.42/include/linux/backlight.h
54228 --- linux-2.6.32.42/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
54229 +++ linux-2.6.32.42/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
54230 @@ -36,18 +36,18 @@ struct backlight_device;
54231 struct fb_info;
54232
54233 struct backlight_ops {
54234 - unsigned int options;
54235 + const unsigned int options;
54236
54237 #define BL_CORE_SUSPENDRESUME (1 << 0)
54238
54239 /* Notify the backlight driver some property has changed */
54240 - int (*update_status)(struct backlight_device *);
54241 + int (* const update_status)(struct backlight_device *);
54242 /* Return the current backlight brightness (accounting for power,
54243 fb_blank etc.) */
54244 - int (*get_brightness)(struct backlight_device *);
54245 + int (* const get_brightness)(struct backlight_device *);
54246 /* Check if given framebuffer device is the one bound to this backlight;
54247 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
54248 - int (*check_fb)(struct fb_info *);
54249 + int (* const check_fb)(struct fb_info *);
54250 };
54251
54252 /* This structure defines all the properties of a backlight */
54253 @@ -86,7 +86,7 @@ struct backlight_device {
54254 registered this device has been unloaded, and if class_get_devdata()
54255 points to something in the body of that driver, it is also invalid. */
54256 struct mutex ops_lock;
54257 - struct backlight_ops *ops;
54258 + const struct backlight_ops *ops;
54259
54260 /* The framebuffer notifier block */
54261 struct notifier_block fb_notif;
54262 @@ -103,7 +103,7 @@ static inline void backlight_update_stat
54263 }
54264
54265 extern struct backlight_device *backlight_device_register(const char *name,
54266 - struct device *dev, void *devdata, struct backlight_ops *ops);
54267 + struct device *dev, void *devdata, const struct backlight_ops *ops);
54268 extern void backlight_device_unregister(struct backlight_device *bd);
54269 extern void backlight_force_update(struct backlight_device *bd,
54270 enum backlight_update_reason reason);
54271 diff -urNp linux-2.6.32.42/include/linux/binfmts.h linux-2.6.32.42/include/linux/binfmts.h
54272 --- linux-2.6.32.42/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
54273 +++ linux-2.6.32.42/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
54274 @@ -83,6 +83,7 @@ struct linux_binfmt {
54275 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
54276 int (*load_shlib)(struct file *);
54277 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
54278 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
54279 unsigned long min_coredump; /* minimal dump size */
54280 int hasvdso;
54281 };
54282 diff -urNp linux-2.6.32.42/include/linux/blkdev.h linux-2.6.32.42/include/linux/blkdev.h
54283 --- linux-2.6.32.42/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
54284 +++ linux-2.6.32.42/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
54285 @@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
54286 #endif /* CONFIG_BLK_DEV_INTEGRITY */
54287
54288 struct block_device_operations {
54289 - int (*open) (struct block_device *, fmode_t);
54290 - int (*release) (struct gendisk *, fmode_t);
54291 - int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54292 - int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54293 - int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54294 - int (*direct_access) (struct block_device *, sector_t,
54295 + int (* const open) (struct block_device *, fmode_t);
54296 + int (* const release) (struct gendisk *, fmode_t);
54297 + int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54298 + int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54299 + int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54300 + int (* const direct_access) (struct block_device *, sector_t,
54301 void **, unsigned long *);
54302 - int (*media_changed) (struct gendisk *);
54303 - unsigned long long (*set_capacity) (struct gendisk *,
54304 + int (* const media_changed) (struct gendisk *);
54305 + unsigned long long (* const set_capacity) (struct gendisk *,
54306 unsigned long long);
54307 - int (*revalidate_disk) (struct gendisk *);
54308 - int (*getgeo)(struct block_device *, struct hd_geometry *);
54309 - struct module *owner;
54310 + int (* const revalidate_disk) (struct gendisk *);
54311 + int (*const getgeo)(struct block_device *, struct hd_geometry *);
54312 + struct module * const owner;
54313 };
54314
54315 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
54316 diff -urNp linux-2.6.32.42/include/linux/blktrace_api.h linux-2.6.32.42/include/linux/blktrace_api.h
54317 --- linux-2.6.32.42/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
54318 +++ linux-2.6.32.42/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
54319 @@ -160,7 +160,7 @@ struct blk_trace {
54320 struct dentry *dir;
54321 struct dentry *dropped_file;
54322 struct dentry *msg_file;
54323 - atomic_t dropped;
54324 + atomic_unchecked_t dropped;
54325 };
54326
54327 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
54328 diff -urNp linux-2.6.32.42/include/linux/byteorder/little_endian.h linux-2.6.32.42/include/linux/byteorder/little_endian.h
54329 --- linux-2.6.32.42/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
54330 +++ linux-2.6.32.42/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
54331 @@ -42,51 +42,51 @@
54332
54333 static inline __le64 __cpu_to_le64p(const __u64 *p)
54334 {
54335 - return (__force __le64)*p;
54336 + return (__force const __le64)*p;
54337 }
54338 static inline __u64 __le64_to_cpup(const __le64 *p)
54339 {
54340 - return (__force __u64)*p;
54341 + return (__force const __u64)*p;
54342 }
54343 static inline __le32 __cpu_to_le32p(const __u32 *p)
54344 {
54345 - return (__force __le32)*p;
54346 + return (__force const __le32)*p;
54347 }
54348 static inline __u32 __le32_to_cpup(const __le32 *p)
54349 {
54350 - return (__force __u32)*p;
54351 + return (__force const __u32)*p;
54352 }
54353 static inline __le16 __cpu_to_le16p(const __u16 *p)
54354 {
54355 - return (__force __le16)*p;
54356 + return (__force const __le16)*p;
54357 }
54358 static inline __u16 __le16_to_cpup(const __le16 *p)
54359 {
54360 - return (__force __u16)*p;
54361 + return (__force const __u16)*p;
54362 }
54363 static inline __be64 __cpu_to_be64p(const __u64 *p)
54364 {
54365 - return (__force __be64)__swab64p(p);
54366 + return (__force const __be64)__swab64p(p);
54367 }
54368 static inline __u64 __be64_to_cpup(const __be64 *p)
54369 {
54370 - return __swab64p((__u64 *)p);
54371 + return __swab64p((const __u64 *)p);
54372 }
54373 static inline __be32 __cpu_to_be32p(const __u32 *p)
54374 {
54375 - return (__force __be32)__swab32p(p);
54376 + return (__force const __be32)__swab32p(p);
54377 }
54378 static inline __u32 __be32_to_cpup(const __be32 *p)
54379 {
54380 - return __swab32p((__u32 *)p);
54381 + return __swab32p((const __u32 *)p);
54382 }
54383 static inline __be16 __cpu_to_be16p(const __u16 *p)
54384 {
54385 - return (__force __be16)__swab16p(p);
54386 + return (__force const __be16)__swab16p(p);
54387 }
54388 static inline __u16 __be16_to_cpup(const __be16 *p)
54389 {
54390 - return __swab16p((__u16 *)p);
54391 + return __swab16p((const __u16 *)p);
54392 }
54393 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
54394 #define __le64_to_cpus(x) do { (void)(x); } while (0)
54395 diff -urNp linux-2.6.32.42/include/linux/cache.h linux-2.6.32.42/include/linux/cache.h
54396 --- linux-2.6.32.42/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
54397 +++ linux-2.6.32.42/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
54398 @@ -16,6 +16,10 @@
54399 #define __read_mostly
54400 #endif
54401
54402 +#ifndef __read_only
54403 +#define __read_only __read_mostly
54404 +#endif
54405 +
54406 #ifndef ____cacheline_aligned
54407 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
54408 #endif
54409 diff -urNp linux-2.6.32.42/include/linux/capability.h linux-2.6.32.42/include/linux/capability.h
54410 --- linux-2.6.32.42/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
54411 +++ linux-2.6.32.42/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
54412 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
54413 (security_real_capable_noaudit((t), (cap)) == 0)
54414
54415 extern int capable(int cap);
54416 +int capable_nolog(int cap);
54417
54418 /* audit system wants to get cap info from files as well */
54419 struct dentry;
54420 diff -urNp linux-2.6.32.42/include/linux/compiler-gcc4.h linux-2.6.32.42/include/linux/compiler-gcc4.h
54421 --- linux-2.6.32.42/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
54422 +++ linux-2.6.32.42/include/linux/compiler-gcc4.h 2011-04-17 15:56:46.000000000 -0400
54423 @@ -36,4 +36,8 @@
54424 the kernel context */
54425 #define __cold __attribute__((__cold__))
54426
54427 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
54428 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
54429 +#define __bos0(ptr) __bos((ptr), 0)
54430 +#define __bos1(ptr) __bos((ptr), 1)
54431 #endif
54432 diff -urNp linux-2.6.32.42/include/linux/compiler.h linux-2.6.32.42/include/linux/compiler.h
54433 --- linux-2.6.32.42/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
54434 +++ linux-2.6.32.42/include/linux/compiler.h 2011-04-17 15:56:46.000000000 -0400
54435 @@ -256,6 +256,22 @@ void ftrace_likely_update(struct ftrace_
54436 #define __cold
54437 #endif
54438
54439 +#ifndef __alloc_size
54440 +#define __alloc_size
54441 +#endif
54442 +
54443 +#ifndef __bos
54444 +#define __bos
54445 +#endif
54446 +
54447 +#ifndef __bos0
54448 +#define __bos0
54449 +#endif
54450 +
54451 +#ifndef __bos1
54452 +#define __bos1
54453 +#endif
54454 +
54455 /* Simple shorthand for a section definition */
54456 #ifndef __section
54457 # define __section(S) __attribute__ ((__section__(#S)))
54458 @@ -278,6 +294,7 @@ void ftrace_likely_update(struct ftrace_
54459 * use is to mediate communication between process-level code and irq/NMI
54460 * handlers, all running on the same CPU.
54461 */
54462 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
54463 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
54464 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
54465
54466 #endif /* __LINUX_COMPILER_H */
54467 diff -urNp linux-2.6.32.42/include/linux/dcache.h linux-2.6.32.42/include/linux/dcache.h
54468 --- linux-2.6.32.42/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
54469 +++ linux-2.6.32.42/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
54470 @@ -119,6 +119,8 @@ struct dentry {
54471 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
54472 };
54473
54474 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
54475 +
54476 /*
54477 * dentry->d_lock spinlock nesting subclasses:
54478 *
54479 diff -urNp linux-2.6.32.42/include/linux/decompress/mm.h linux-2.6.32.42/include/linux/decompress/mm.h
54480 --- linux-2.6.32.42/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
54481 +++ linux-2.6.32.42/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
54482 @@ -78,7 +78,7 @@ static void free(void *where)
54483 * warnings when not needed (indeed large_malloc / large_free are not
54484 * needed by inflate */
54485
54486 -#define malloc(a) kmalloc(a, GFP_KERNEL)
54487 +#define malloc(a) kmalloc((a), GFP_KERNEL)
54488 #define free(a) kfree(a)
54489
54490 #define large_malloc(a) vmalloc(a)
54491 diff -urNp linux-2.6.32.42/include/linux/dma-mapping.h linux-2.6.32.42/include/linux/dma-mapping.h
54492 --- linux-2.6.32.42/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
54493 +++ linux-2.6.32.42/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
54494 @@ -16,50 +16,50 @@ enum dma_data_direction {
54495 };
54496
54497 struct dma_map_ops {
54498 - void* (*alloc_coherent)(struct device *dev, size_t size,
54499 + void* (* const alloc_coherent)(struct device *dev, size_t size,
54500 dma_addr_t *dma_handle, gfp_t gfp);
54501 - void (*free_coherent)(struct device *dev, size_t size,
54502 + void (* const free_coherent)(struct device *dev, size_t size,
54503 void *vaddr, dma_addr_t dma_handle);
54504 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
54505 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
54506 unsigned long offset, size_t size,
54507 enum dma_data_direction dir,
54508 struct dma_attrs *attrs);
54509 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
54510 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
54511 size_t size, enum dma_data_direction dir,
54512 struct dma_attrs *attrs);
54513 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
54514 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
54515 int nents, enum dma_data_direction dir,
54516 struct dma_attrs *attrs);
54517 - void (*unmap_sg)(struct device *dev,
54518 + void (* const unmap_sg)(struct device *dev,
54519 struct scatterlist *sg, int nents,
54520 enum dma_data_direction dir,
54521 struct dma_attrs *attrs);
54522 - void (*sync_single_for_cpu)(struct device *dev,
54523 + void (* const sync_single_for_cpu)(struct device *dev,
54524 dma_addr_t dma_handle, size_t size,
54525 enum dma_data_direction dir);
54526 - void (*sync_single_for_device)(struct device *dev,
54527 + void (* const sync_single_for_device)(struct device *dev,
54528 dma_addr_t dma_handle, size_t size,
54529 enum dma_data_direction dir);
54530 - void (*sync_single_range_for_cpu)(struct device *dev,
54531 + void (* const sync_single_range_for_cpu)(struct device *dev,
54532 dma_addr_t dma_handle,
54533 unsigned long offset,
54534 size_t size,
54535 enum dma_data_direction dir);
54536 - void (*sync_single_range_for_device)(struct device *dev,
54537 + void (* const sync_single_range_for_device)(struct device *dev,
54538 dma_addr_t dma_handle,
54539 unsigned long offset,
54540 size_t size,
54541 enum dma_data_direction dir);
54542 - void (*sync_sg_for_cpu)(struct device *dev,
54543 + void (* const sync_sg_for_cpu)(struct device *dev,
54544 struct scatterlist *sg, int nents,
54545 enum dma_data_direction dir);
54546 - void (*sync_sg_for_device)(struct device *dev,
54547 + void (* const sync_sg_for_device)(struct device *dev,
54548 struct scatterlist *sg, int nents,
54549 enum dma_data_direction dir);
54550 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
54551 - int (*dma_supported)(struct device *dev, u64 mask);
54552 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
54553 + int (* const dma_supported)(struct device *dev, u64 mask);
54554 int (*set_dma_mask)(struct device *dev, u64 mask);
54555 - int is_phys;
54556 + const int is_phys;
54557 };
54558
54559 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
54560 diff -urNp linux-2.6.32.42/include/linux/dst.h linux-2.6.32.42/include/linux/dst.h
54561 --- linux-2.6.32.42/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
54562 +++ linux-2.6.32.42/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
54563 @@ -380,7 +380,7 @@ struct dst_node
54564 struct thread_pool *pool;
54565
54566 /* Transaction IDs live here */
54567 - atomic_long_t gen;
54568 + atomic_long_unchecked_t gen;
54569
54570 /*
54571 * How frequently and how many times transaction
54572 diff -urNp linux-2.6.32.42/include/linux/elf.h linux-2.6.32.42/include/linux/elf.h
54573 --- linux-2.6.32.42/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
54574 +++ linux-2.6.32.42/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
54575 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
54576 #define PT_GNU_EH_FRAME 0x6474e550
54577
54578 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
54579 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
54580 +
54581 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
54582 +
54583 +/* Constants for the e_flags field */
54584 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54585 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
54586 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
54587 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
54588 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54589 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54590
54591 /* These constants define the different elf file types */
54592 #define ET_NONE 0
54593 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
54594 #define DT_DEBUG 21
54595 #define DT_TEXTREL 22
54596 #define DT_JMPREL 23
54597 +#define DT_FLAGS 30
54598 + #define DF_TEXTREL 0x00000004
54599 #define DT_ENCODING 32
54600 #define OLD_DT_LOOS 0x60000000
54601 #define DT_LOOS 0x6000000d
54602 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
54603 #define PF_W 0x2
54604 #define PF_X 0x1
54605
54606 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
54607 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
54608 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
54609 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
54610 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
54611 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
54612 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
54613 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
54614 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
54615 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
54616 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
54617 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
54618 +
54619 typedef struct elf32_phdr{
54620 Elf32_Word p_type;
54621 Elf32_Off p_offset;
54622 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
54623 #define EI_OSABI 7
54624 #define EI_PAD 8
54625
54626 +#define EI_PAX 14
54627 +
54628 #define ELFMAG0 0x7f /* EI_MAG */
54629 #define ELFMAG1 'E'
54630 #define ELFMAG2 'L'
54631 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
54632 #define elf_phdr elf32_phdr
54633 #define elf_note elf32_note
54634 #define elf_addr_t Elf32_Off
54635 +#define elf_dyn Elf32_Dyn
54636
54637 #else
54638
54639 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
54640 #define elf_phdr elf64_phdr
54641 #define elf_note elf64_note
54642 #define elf_addr_t Elf64_Off
54643 +#define elf_dyn Elf64_Dyn
54644
54645 #endif
54646
54647 diff -urNp linux-2.6.32.42/include/linux/fscache-cache.h linux-2.6.32.42/include/linux/fscache-cache.h
54648 --- linux-2.6.32.42/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
54649 +++ linux-2.6.32.42/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
54650 @@ -116,7 +116,7 @@ struct fscache_operation {
54651 #endif
54652 };
54653
54654 -extern atomic_t fscache_op_debug_id;
54655 +extern atomic_unchecked_t fscache_op_debug_id;
54656 extern const struct slow_work_ops fscache_op_slow_work_ops;
54657
54658 extern void fscache_enqueue_operation(struct fscache_operation *);
54659 @@ -134,7 +134,7 @@ static inline void fscache_operation_ini
54660 fscache_operation_release_t release)
54661 {
54662 atomic_set(&op->usage, 1);
54663 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
54664 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
54665 op->release = release;
54666 INIT_LIST_HEAD(&op->pend_link);
54667 fscache_set_op_state(op, "Init");
54668 diff -urNp linux-2.6.32.42/include/linux/fs.h linux-2.6.32.42/include/linux/fs.h
54669 --- linux-2.6.32.42/include/linux/fs.h 2011-03-27 14:31:47.000000000 -0400
54670 +++ linux-2.6.32.42/include/linux/fs.h 2011-04-17 15:56:46.000000000 -0400
54671 @@ -90,6 +90,11 @@ struct inodes_stat_t {
54672 /* Expect random access pattern */
54673 #define FMODE_RANDOM ((__force fmode_t)4096)
54674
54675 +/* Hack for grsec so as not to require read permission simply to execute
54676 + * a binary
54677 + */
54678 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
54679 +
54680 /*
54681 * The below are the various read and write types that we support. Some of
54682 * them include behavioral modifiers that send information down to the
54683 @@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
54684 unsigned long, unsigned long);
54685
54686 struct address_space_operations {
54687 - int (*writepage)(struct page *page, struct writeback_control *wbc);
54688 - int (*readpage)(struct file *, struct page *);
54689 - void (*sync_page)(struct page *);
54690 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
54691 + int (* const readpage)(struct file *, struct page *);
54692 + void (* const sync_page)(struct page *);
54693
54694 /* Write back some dirty pages from this mapping. */
54695 - int (*writepages)(struct address_space *, struct writeback_control *);
54696 + int (* const writepages)(struct address_space *, struct writeback_control *);
54697
54698 /* Set a page dirty. Return true if this dirtied it */
54699 - int (*set_page_dirty)(struct page *page);
54700 + int (* const set_page_dirty)(struct page *page);
54701
54702 - int (*readpages)(struct file *filp, struct address_space *mapping,
54703 + int (* const readpages)(struct file *filp, struct address_space *mapping,
54704 struct list_head *pages, unsigned nr_pages);
54705
54706 - int (*write_begin)(struct file *, struct address_space *mapping,
54707 + int (* const write_begin)(struct file *, struct address_space *mapping,
54708 loff_t pos, unsigned len, unsigned flags,
54709 struct page **pagep, void **fsdata);
54710 - int (*write_end)(struct file *, struct address_space *mapping,
54711 + int (* const write_end)(struct file *, struct address_space *mapping,
54712 loff_t pos, unsigned len, unsigned copied,
54713 struct page *page, void *fsdata);
54714
54715 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
54716 - sector_t (*bmap)(struct address_space *, sector_t);
54717 - void (*invalidatepage) (struct page *, unsigned long);
54718 - int (*releasepage) (struct page *, gfp_t);
54719 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
54720 + sector_t (* const bmap)(struct address_space *, sector_t);
54721 + void (* const invalidatepage) (struct page *, unsigned long);
54722 + int (* const releasepage) (struct page *, gfp_t);
54723 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
54724 loff_t offset, unsigned long nr_segs);
54725 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
54726 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
54727 void **, unsigned long *);
54728 /* migrate the contents of a page to the specified target */
54729 - int (*migratepage) (struct address_space *,
54730 + int (* const migratepage) (struct address_space *,
54731 struct page *, struct page *);
54732 - int (*launder_page) (struct page *);
54733 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
54734 + int (* const launder_page) (struct page *);
54735 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
54736 unsigned long);
54737 - int (*error_remove_page)(struct address_space *, struct page *);
54738 + int (* const error_remove_page)(struct address_space *, struct page *);
54739 };
54740
54741 /*
54742 @@ -1030,19 +1035,19 @@ static inline int file_check_writeable(s
54743 typedef struct files_struct *fl_owner_t;
54744
54745 struct file_lock_operations {
54746 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
54747 - void (*fl_release_private)(struct file_lock *);
54748 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
54749 + void (* const fl_release_private)(struct file_lock *);
54750 };
54751
54752 struct lock_manager_operations {
54753 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
54754 - void (*fl_notify)(struct file_lock *); /* unblock callback */
54755 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
54756 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
54757 - void (*fl_release_private)(struct file_lock *);
54758 - void (*fl_break)(struct file_lock *);
54759 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
54760 - int (*fl_change)(struct file_lock **, int);
54761 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
54762 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
54763 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
54764 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
54765 + void (* const fl_release_private)(struct file_lock *);
54766 + void (* const fl_break)(struct file_lock *);
54767 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
54768 + int (* const fl_change)(struct file_lock **, int);
54769 };
54770
54771 struct lock_manager {
54772 @@ -1441,7 +1446,7 @@ struct fiemap_extent_info {
54773 unsigned int fi_flags; /* Flags as passed from user */
54774 unsigned int fi_extents_mapped; /* Number of mapped extents */
54775 unsigned int fi_extents_max; /* Size of fiemap_extent array */
54776 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
54777 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
54778 * array */
54779 };
54780 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
54781 @@ -1558,30 +1563,30 @@ extern ssize_t vfs_writev(struct file *,
54782 unsigned long, loff_t *);
54783
54784 struct super_operations {
54785 - struct inode *(*alloc_inode)(struct super_block *sb);
54786 - void (*destroy_inode)(struct inode *);
54787 + struct inode *(* const alloc_inode)(struct super_block *sb);
54788 + void (* const destroy_inode)(struct inode *);
54789
54790 - void (*dirty_inode) (struct inode *);
54791 - int (*write_inode) (struct inode *, int);
54792 - void (*drop_inode) (struct inode *);
54793 - void (*delete_inode) (struct inode *);
54794 - void (*put_super) (struct super_block *);
54795 - void (*write_super) (struct super_block *);
54796 - int (*sync_fs)(struct super_block *sb, int wait);
54797 - int (*freeze_fs) (struct super_block *);
54798 - int (*unfreeze_fs) (struct super_block *);
54799 - int (*statfs) (struct dentry *, struct kstatfs *);
54800 - int (*remount_fs) (struct super_block *, int *, char *);
54801 - void (*clear_inode) (struct inode *);
54802 - void (*umount_begin) (struct super_block *);
54803 + void (* const dirty_inode) (struct inode *);
54804 + int (* const write_inode) (struct inode *, int);
54805 + void (* const drop_inode) (struct inode *);
54806 + void (* const delete_inode) (struct inode *);
54807 + void (* const put_super) (struct super_block *);
54808 + void (* const write_super) (struct super_block *);
54809 + int (* const sync_fs)(struct super_block *sb, int wait);
54810 + int (* const freeze_fs) (struct super_block *);
54811 + int (* const unfreeze_fs) (struct super_block *);
54812 + int (* const statfs) (struct dentry *, struct kstatfs *);
54813 + int (* const remount_fs) (struct super_block *, int *, char *);
54814 + void (* const clear_inode) (struct inode *);
54815 + void (* const umount_begin) (struct super_block *);
54816
54817 - int (*show_options)(struct seq_file *, struct vfsmount *);
54818 - int (*show_stats)(struct seq_file *, struct vfsmount *);
54819 + int (* const show_options)(struct seq_file *, struct vfsmount *);
54820 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
54821 #ifdef CONFIG_QUOTA
54822 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
54823 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
54824 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
54825 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
54826 #endif
54827 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
54828 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
54829 };
54830
54831 /*
54832 diff -urNp linux-2.6.32.42/include/linux/fs_struct.h linux-2.6.32.42/include/linux/fs_struct.h
54833 --- linux-2.6.32.42/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
54834 +++ linux-2.6.32.42/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
54835 @@ -4,7 +4,7 @@
54836 #include <linux/path.h>
54837
54838 struct fs_struct {
54839 - int users;
54840 + atomic_t users;
54841 rwlock_t lock;
54842 int umask;
54843 int in_exec;
54844 diff -urNp linux-2.6.32.42/include/linux/ftrace_event.h linux-2.6.32.42/include/linux/ftrace_event.h
54845 --- linux-2.6.32.42/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
54846 +++ linux-2.6.32.42/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
54847 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
54848 int filter_type);
54849 extern int trace_define_common_fields(struct ftrace_event_call *call);
54850
54851 -#define is_signed_type(type) (((type)(-1)) < 0)
54852 +#define is_signed_type(type) (((type)(-1)) < (type)1)
54853
54854 int trace_set_clr_event(const char *system, const char *event, int set);
54855
54856 diff -urNp linux-2.6.32.42/include/linux/genhd.h linux-2.6.32.42/include/linux/genhd.h
54857 --- linux-2.6.32.42/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
54858 +++ linux-2.6.32.42/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
54859 @@ -161,7 +161,7 @@ struct gendisk {
54860
54861 struct timer_rand_state *random;
54862
54863 - atomic_t sync_io; /* RAID */
54864 + atomic_unchecked_t sync_io; /* RAID */
54865 struct work_struct async_notify;
54866 #ifdef CONFIG_BLK_DEV_INTEGRITY
54867 struct blk_integrity *integrity;
54868 diff -urNp linux-2.6.32.42/include/linux/gracl.h linux-2.6.32.42/include/linux/gracl.h
54869 --- linux-2.6.32.42/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
54870 +++ linux-2.6.32.42/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
54871 @@ -0,0 +1,317 @@
54872 +#ifndef GR_ACL_H
54873 +#define GR_ACL_H
54874 +
54875 +#include <linux/grdefs.h>
54876 +#include <linux/resource.h>
54877 +#include <linux/capability.h>
54878 +#include <linux/dcache.h>
54879 +#include <asm/resource.h>
54880 +
54881 +/* Major status information */
54882 +
54883 +#define GR_VERSION "grsecurity 2.2.2"
54884 +#define GRSECURITY_VERSION 0x2202
54885 +
54886 +enum {
54887 + GR_SHUTDOWN = 0,
54888 + GR_ENABLE = 1,
54889 + GR_SPROLE = 2,
54890 + GR_RELOAD = 3,
54891 + GR_SEGVMOD = 4,
54892 + GR_STATUS = 5,
54893 + GR_UNSPROLE = 6,
54894 + GR_PASSSET = 7,
54895 + GR_SPROLEPAM = 8,
54896 +};
54897 +
54898 +/* Password setup definitions
54899 + * kernel/grhash.c */
54900 +enum {
54901 + GR_PW_LEN = 128,
54902 + GR_SALT_LEN = 16,
54903 + GR_SHA_LEN = 32,
54904 +};
54905 +
54906 +enum {
54907 + GR_SPROLE_LEN = 64,
54908 +};
54909 +
54910 +enum {
54911 + GR_NO_GLOB = 0,
54912 + GR_REG_GLOB,
54913 + GR_CREATE_GLOB
54914 +};
54915 +
54916 +#define GR_NLIMITS 32
54917 +
54918 +/* Begin Data Structures */
54919 +
54920 +struct sprole_pw {
54921 + unsigned char *rolename;
54922 + unsigned char salt[GR_SALT_LEN];
54923 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
54924 +};
54925 +
54926 +struct name_entry {
54927 + __u32 key;
54928 + ino_t inode;
54929 + dev_t device;
54930 + char *name;
54931 + __u16 len;
54932 + __u8 deleted;
54933 + struct name_entry *prev;
54934 + struct name_entry *next;
54935 +};
54936 +
54937 +struct inodev_entry {
54938 + struct name_entry *nentry;
54939 + struct inodev_entry *prev;
54940 + struct inodev_entry *next;
54941 +};
54942 +
54943 +struct acl_role_db {
54944 + struct acl_role_label **r_hash;
54945 + __u32 r_size;
54946 +};
54947 +
54948 +struct inodev_db {
54949 + struct inodev_entry **i_hash;
54950 + __u32 i_size;
54951 +};
54952 +
54953 +struct name_db {
54954 + struct name_entry **n_hash;
54955 + __u32 n_size;
54956 +};
54957 +
54958 +struct crash_uid {
54959 + uid_t uid;
54960 + unsigned long expires;
54961 +};
54962 +
54963 +struct gr_hash_struct {
54964 + void **table;
54965 + void **nametable;
54966 + void *first;
54967 + __u32 table_size;
54968 + __u32 used_size;
54969 + int type;
54970 +};
54971 +
54972 +/* Userspace Grsecurity ACL data structures */
54973 +
54974 +struct acl_subject_label {
54975 + char *filename;
54976 + ino_t inode;
54977 + dev_t device;
54978 + __u32 mode;
54979 + kernel_cap_t cap_mask;
54980 + kernel_cap_t cap_lower;
54981 + kernel_cap_t cap_invert_audit;
54982 +
54983 + struct rlimit res[GR_NLIMITS];
54984 + __u32 resmask;
54985 +
54986 + __u8 user_trans_type;
54987 + __u8 group_trans_type;
54988 + uid_t *user_transitions;
54989 + gid_t *group_transitions;
54990 + __u16 user_trans_num;
54991 + __u16 group_trans_num;
54992 +
54993 + __u32 sock_families[2];
54994 + __u32 ip_proto[8];
54995 + __u32 ip_type;
54996 + struct acl_ip_label **ips;
54997 + __u32 ip_num;
54998 + __u32 inaddr_any_override;
54999 +
55000 + __u32 crashes;
55001 + unsigned long expires;
55002 +
55003 + struct acl_subject_label *parent_subject;
55004 + struct gr_hash_struct *hash;
55005 + struct acl_subject_label *prev;
55006 + struct acl_subject_label *next;
55007 +
55008 + struct acl_object_label **obj_hash;
55009 + __u32 obj_hash_size;
55010 + __u16 pax_flags;
55011 +};
55012 +
55013 +struct role_allowed_ip {
55014 + __u32 addr;
55015 + __u32 netmask;
55016 +
55017 + struct role_allowed_ip *prev;
55018 + struct role_allowed_ip *next;
55019 +};
55020 +
55021 +struct role_transition {
55022 + char *rolename;
55023 +
55024 + struct role_transition *prev;
55025 + struct role_transition *next;
55026 +};
55027 +
55028 +struct acl_role_label {
55029 + char *rolename;
55030 + uid_t uidgid;
55031 + __u16 roletype;
55032 +
55033 + __u16 auth_attempts;
55034 + unsigned long expires;
55035 +
55036 + struct acl_subject_label *root_label;
55037 + struct gr_hash_struct *hash;
55038 +
55039 + struct acl_role_label *prev;
55040 + struct acl_role_label *next;
55041 +
55042 + struct role_transition *transitions;
55043 + struct role_allowed_ip *allowed_ips;
55044 + uid_t *domain_children;
55045 + __u16 domain_child_num;
55046 +
55047 + struct acl_subject_label **subj_hash;
55048 + __u32 subj_hash_size;
55049 +};
55050 +
55051 +struct user_acl_role_db {
55052 + struct acl_role_label **r_table;
55053 + __u32 num_pointers; /* Number of allocations to track */
55054 + __u32 num_roles; /* Number of roles */
55055 + __u32 num_domain_children; /* Number of domain children */
55056 + __u32 num_subjects; /* Number of subjects */
55057 + __u32 num_objects; /* Number of objects */
55058 +};
55059 +
55060 +struct acl_object_label {
55061 + char *filename;
55062 + ino_t inode;
55063 + dev_t device;
55064 + __u32 mode;
55065 +
55066 + struct acl_subject_label *nested;
55067 + struct acl_object_label *globbed;
55068 +
55069 + /* next two structures not used */
55070 +
55071 + struct acl_object_label *prev;
55072 + struct acl_object_label *next;
55073 +};
55074 +
55075 +struct acl_ip_label {
55076 + char *iface;
55077 + __u32 addr;
55078 + __u32 netmask;
55079 + __u16 low, high;
55080 + __u8 mode;
55081 + __u32 type;
55082 + __u32 proto[8];
55083 +
55084 + /* next two structures not used */
55085 +
55086 + struct acl_ip_label *prev;
55087 + struct acl_ip_label *next;
55088 +};
55089 +
55090 +struct gr_arg {
55091 + struct user_acl_role_db role_db;
55092 + unsigned char pw[GR_PW_LEN];
55093 + unsigned char salt[GR_SALT_LEN];
55094 + unsigned char sum[GR_SHA_LEN];
55095 + unsigned char sp_role[GR_SPROLE_LEN];
55096 + struct sprole_pw *sprole_pws;
55097 + dev_t segv_device;
55098 + ino_t segv_inode;
55099 + uid_t segv_uid;
55100 + __u16 num_sprole_pws;
55101 + __u16 mode;
55102 +};
55103 +
55104 +struct gr_arg_wrapper {
55105 + struct gr_arg *arg;
55106 + __u32 version;
55107 + __u32 size;
55108 +};
55109 +
55110 +struct subject_map {
55111 + struct acl_subject_label *user;
55112 + struct acl_subject_label *kernel;
55113 + struct subject_map *prev;
55114 + struct subject_map *next;
55115 +};
55116 +
55117 +struct acl_subj_map_db {
55118 + struct subject_map **s_hash;
55119 + __u32 s_size;
55120 +};
55121 +
55122 +/* End Data Structures Section */
55123 +
55124 +/* Hash functions generated by empirical testing by Brad Spengler
55125 + Makes good use of the low bits of the inode. Generally 0-1 times
55126 + in loop for successful match. 0-3 for unsuccessful match.
55127 + Shift/add algorithm with modulus of table size and an XOR*/
55128 +
55129 +static __inline__ unsigned int
55130 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
55131 +{
55132 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
55133 +}
55134 +
55135 + static __inline__ unsigned int
55136 +shash(const struct acl_subject_label *userp, const unsigned int sz)
55137 +{
55138 + return ((const unsigned long)userp % sz);
55139 +}
55140 +
55141 +static __inline__ unsigned int
55142 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
55143 +{
55144 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
55145 +}
55146 +
55147 +static __inline__ unsigned int
55148 +nhash(const char *name, const __u16 len, const unsigned int sz)
55149 +{
55150 + return full_name_hash((const unsigned char *)name, len) % sz;
55151 +}
55152 +
55153 +#define FOR_EACH_ROLE_START(role) \
55154 + role = role_list; \
55155 + while (role) {
55156 +
55157 +#define FOR_EACH_ROLE_END(role) \
55158 + role = role->prev; \
55159 + }
55160 +
55161 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
55162 + subj = NULL; \
55163 + iter = 0; \
55164 + while (iter < role->subj_hash_size) { \
55165 + if (subj == NULL) \
55166 + subj = role->subj_hash[iter]; \
55167 + if (subj == NULL) { \
55168 + iter++; \
55169 + continue; \
55170 + }
55171 +
55172 +#define FOR_EACH_SUBJECT_END(subj,iter) \
55173 + subj = subj->next; \
55174 + if (subj == NULL) \
55175 + iter++; \
55176 + }
55177 +
55178 +
55179 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
55180 + subj = role->hash->first; \
55181 + while (subj != NULL) {
55182 +
55183 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
55184 + subj = subj->next; \
55185 + }
55186 +
55187 +#endif
55188 +
55189 diff -urNp linux-2.6.32.42/include/linux/gralloc.h linux-2.6.32.42/include/linux/gralloc.h
55190 --- linux-2.6.32.42/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
55191 +++ linux-2.6.32.42/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
55192 @@ -0,0 +1,9 @@
55193 +#ifndef __GRALLOC_H
55194 +#define __GRALLOC_H
55195 +
55196 +void acl_free_all(void);
55197 +int acl_alloc_stack_init(unsigned long size);
55198 +void *acl_alloc(unsigned long len);
55199 +void *acl_alloc_num(unsigned long num, unsigned long len);
55200 +
55201 +#endif
55202 diff -urNp linux-2.6.32.42/include/linux/grdefs.h linux-2.6.32.42/include/linux/grdefs.h
55203 --- linux-2.6.32.42/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
55204 +++ linux-2.6.32.42/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
55205 @@ -0,0 +1,140 @@
55206 +#ifndef GRDEFS_H
55207 +#define GRDEFS_H
55208 +
55209 +/* Begin grsecurity status declarations */
55210 +
55211 +enum {
55212 + GR_READY = 0x01,
55213 + GR_STATUS_INIT = 0x00 // disabled state
55214 +};
55215 +
55216 +/* Begin ACL declarations */
55217 +
55218 +/* Role flags */
55219 +
55220 +enum {
55221 + GR_ROLE_USER = 0x0001,
55222 + GR_ROLE_GROUP = 0x0002,
55223 + GR_ROLE_DEFAULT = 0x0004,
55224 + GR_ROLE_SPECIAL = 0x0008,
55225 + GR_ROLE_AUTH = 0x0010,
55226 + GR_ROLE_NOPW = 0x0020,
55227 + GR_ROLE_GOD = 0x0040,
55228 + GR_ROLE_LEARN = 0x0080,
55229 + GR_ROLE_TPE = 0x0100,
55230 + GR_ROLE_DOMAIN = 0x0200,
55231 + GR_ROLE_PAM = 0x0400,
55232 + GR_ROLE_PERSIST = 0x800
55233 +};
55234 +
55235 +/* ACL Subject and Object mode flags */
55236 +enum {
55237 + GR_DELETED = 0x80000000
55238 +};
55239 +
55240 +/* ACL Object-only mode flags */
55241 +enum {
55242 + GR_READ = 0x00000001,
55243 + GR_APPEND = 0x00000002,
55244 + GR_WRITE = 0x00000004,
55245 + GR_EXEC = 0x00000008,
55246 + GR_FIND = 0x00000010,
55247 + GR_INHERIT = 0x00000020,
55248 + GR_SETID = 0x00000040,
55249 + GR_CREATE = 0x00000080,
55250 + GR_DELETE = 0x00000100,
55251 + GR_LINK = 0x00000200,
55252 + GR_AUDIT_READ = 0x00000400,
55253 + GR_AUDIT_APPEND = 0x00000800,
55254 + GR_AUDIT_WRITE = 0x00001000,
55255 + GR_AUDIT_EXEC = 0x00002000,
55256 + GR_AUDIT_FIND = 0x00004000,
55257 + GR_AUDIT_INHERIT= 0x00008000,
55258 + GR_AUDIT_SETID = 0x00010000,
55259 + GR_AUDIT_CREATE = 0x00020000,
55260 + GR_AUDIT_DELETE = 0x00040000,
55261 + GR_AUDIT_LINK = 0x00080000,
55262 + GR_PTRACERD = 0x00100000,
55263 + GR_NOPTRACE = 0x00200000,
55264 + GR_SUPPRESS = 0x00400000,
55265 + GR_NOLEARN = 0x00800000,
55266 + GR_INIT_TRANSFER= 0x01000000
55267 +};
55268 +
55269 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
55270 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
55271 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
55272 +
55273 +/* ACL subject-only mode flags */
55274 +enum {
55275 + GR_KILL = 0x00000001,
55276 + GR_VIEW = 0x00000002,
55277 + GR_PROTECTED = 0x00000004,
55278 + GR_LEARN = 0x00000008,
55279 + GR_OVERRIDE = 0x00000010,
55280 + /* just a placeholder, this mode is only used in userspace */
55281 + GR_DUMMY = 0x00000020,
55282 + GR_PROTSHM = 0x00000040,
55283 + GR_KILLPROC = 0x00000080,
55284 + GR_KILLIPPROC = 0x00000100,
55285 + /* just a placeholder, this mode is only used in userspace */
55286 + GR_NOTROJAN = 0x00000200,
55287 + GR_PROTPROCFD = 0x00000400,
55288 + GR_PROCACCT = 0x00000800,
55289 + GR_RELAXPTRACE = 0x00001000,
55290 + GR_NESTED = 0x00002000,
55291 + GR_INHERITLEARN = 0x00004000,
55292 + GR_PROCFIND = 0x00008000,
55293 + GR_POVERRIDE = 0x00010000,
55294 + GR_KERNELAUTH = 0x00020000,
55295 + GR_ATSECURE = 0x00040000,
55296 + GR_SHMEXEC = 0x00080000
55297 +};
55298 +
55299 +enum {
55300 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
55301 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
55302 + GR_PAX_ENABLE_MPROTECT = 0x0004,
55303 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
55304 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
55305 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
55306 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
55307 + GR_PAX_DISABLE_MPROTECT = 0x0400,
55308 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
55309 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
55310 +};
55311 +
55312 +enum {
55313 + GR_ID_USER = 0x01,
55314 + GR_ID_GROUP = 0x02,
55315 +};
55316 +
55317 +enum {
55318 + GR_ID_ALLOW = 0x01,
55319 + GR_ID_DENY = 0x02,
55320 +};
55321 +
55322 +#define GR_CRASH_RES 31
55323 +#define GR_UIDTABLE_MAX 500
55324 +
55325 +/* begin resource learning section */
55326 +enum {
55327 + GR_RLIM_CPU_BUMP = 60,
55328 + GR_RLIM_FSIZE_BUMP = 50000,
55329 + GR_RLIM_DATA_BUMP = 10000,
55330 + GR_RLIM_STACK_BUMP = 1000,
55331 + GR_RLIM_CORE_BUMP = 10000,
55332 + GR_RLIM_RSS_BUMP = 500000,
55333 + GR_RLIM_NPROC_BUMP = 1,
55334 + GR_RLIM_NOFILE_BUMP = 5,
55335 + GR_RLIM_MEMLOCK_BUMP = 50000,
55336 + GR_RLIM_AS_BUMP = 500000,
55337 + GR_RLIM_LOCKS_BUMP = 2,
55338 + GR_RLIM_SIGPENDING_BUMP = 5,
55339 + GR_RLIM_MSGQUEUE_BUMP = 10000,
55340 + GR_RLIM_NICE_BUMP = 1,
55341 + GR_RLIM_RTPRIO_BUMP = 1,
55342 + GR_RLIM_RTTIME_BUMP = 1000000
55343 +};
55344 +
55345 +#endif
55346 diff -urNp linux-2.6.32.42/include/linux/grinternal.h linux-2.6.32.42/include/linux/grinternal.h
55347 --- linux-2.6.32.42/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
55348 +++ linux-2.6.32.42/include/linux/grinternal.h 2011-04-17 15:56:46.000000000 -0400
55349 @@ -0,0 +1,218 @@
55350 +#ifndef __GRINTERNAL_H
55351 +#define __GRINTERNAL_H
55352 +
55353 +#ifdef CONFIG_GRKERNSEC
55354 +
55355 +#include <linux/fs.h>
55356 +#include <linux/mnt_namespace.h>
55357 +#include <linux/nsproxy.h>
55358 +#include <linux/gracl.h>
55359 +#include <linux/grdefs.h>
55360 +#include <linux/grmsg.h>
55361 +
55362 +void gr_add_learn_entry(const char *fmt, ...)
55363 + __attribute__ ((format (printf, 1, 2)));
55364 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
55365 + const struct vfsmount *mnt);
55366 +__u32 gr_check_create(const struct dentry *new_dentry,
55367 + const struct dentry *parent,
55368 + const struct vfsmount *mnt, const __u32 mode);
55369 +int gr_check_protected_task(const struct task_struct *task);
55370 +__u32 to_gr_audit(const __u32 reqmode);
55371 +int gr_set_acls(const int type);
55372 +int gr_apply_subject_to_task(struct task_struct *task);
55373 +int gr_acl_is_enabled(void);
55374 +char gr_roletype_to_char(void);
55375 +
55376 +void gr_handle_alertkill(struct task_struct *task);
55377 +char *gr_to_filename(const struct dentry *dentry,
55378 + const struct vfsmount *mnt);
55379 +char *gr_to_filename1(const struct dentry *dentry,
55380 + const struct vfsmount *mnt);
55381 +char *gr_to_filename2(const struct dentry *dentry,
55382 + const struct vfsmount *mnt);
55383 +char *gr_to_filename3(const struct dentry *dentry,
55384 + const struct vfsmount *mnt);
55385 +
55386 +extern int grsec_enable_harden_ptrace;
55387 +extern int grsec_enable_link;
55388 +extern int grsec_enable_fifo;
55389 +extern int grsec_enable_execve;
55390 +extern int grsec_enable_shm;
55391 +extern int grsec_enable_execlog;
55392 +extern int grsec_enable_signal;
55393 +extern int grsec_enable_audit_ptrace;
55394 +extern int grsec_enable_forkfail;
55395 +extern int grsec_enable_time;
55396 +extern int grsec_enable_rofs;
55397 +extern int grsec_enable_chroot_shmat;
55398 +extern int grsec_enable_chroot_findtask;
55399 +extern int grsec_enable_chroot_mount;
55400 +extern int grsec_enable_chroot_double;
55401 +extern int grsec_enable_chroot_pivot;
55402 +extern int grsec_enable_chroot_chdir;
55403 +extern int grsec_enable_chroot_chmod;
55404 +extern int grsec_enable_chroot_mknod;
55405 +extern int grsec_enable_chroot_fchdir;
55406 +extern int grsec_enable_chroot_nice;
55407 +extern int grsec_enable_chroot_execlog;
55408 +extern int grsec_enable_chroot_caps;
55409 +extern int grsec_enable_chroot_sysctl;
55410 +extern int grsec_enable_chroot_unix;
55411 +extern int grsec_enable_tpe;
55412 +extern int grsec_tpe_gid;
55413 +extern int grsec_enable_tpe_all;
55414 +extern int grsec_enable_tpe_invert;
55415 +extern int grsec_enable_socket_all;
55416 +extern int grsec_socket_all_gid;
55417 +extern int grsec_enable_socket_client;
55418 +extern int grsec_socket_client_gid;
55419 +extern int grsec_enable_socket_server;
55420 +extern int grsec_socket_server_gid;
55421 +extern int grsec_audit_gid;
55422 +extern int grsec_enable_group;
55423 +extern int grsec_enable_audit_textrel;
55424 +extern int grsec_enable_log_rwxmaps;
55425 +extern int grsec_enable_mount;
55426 +extern int grsec_enable_chdir;
55427 +extern int grsec_resource_logging;
55428 +extern int grsec_enable_blackhole;
55429 +extern int grsec_lastack_retries;
55430 +extern int grsec_lock;
55431 +
55432 +extern spinlock_t grsec_alert_lock;
55433 +extern unsigned long grsec_alert_wtime;
55434 +extern unsigned long grsec_alert_fyet;
55435 +
55436 +extern spinlock_t grsec_audit_lock;
55437 +
55438 +extern rwlock_t grsec_exec_file_lock;
55439 +
55440 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
55441 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
55442 + (tsk)->exec_file->f_vfsmnt) : "/")
55443 +
55444 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
55445 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
55446 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55447 +
55448 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
55449 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
55450 + (tsk)->exec_file->f_vfsmnt) : "/")
55451 +
55452 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
55453 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
55454 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55455 +
55456 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
55457 +
55458 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
55459 +
55460 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
55461 + (task)->pid, (cred)->uid, \
55462 + (cred)->euid, (cred)->gid, (cred)->egid, \
55463 + gr_parent_task_fullpath(task), \
55464 + (task)->real_parent->comm, (task)->real_parent->pid, \
55465 + (pcred)->uid, (pcred)->euid, \
55466 + (pcred)->gid, (pcred)->egid
55467 +
55468 +#define GR_CHROOT_CAPS {{ \
55469 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
55470 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
55471 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
55472 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
55473 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
55474 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
55475 +
55476 +#define security_learn(normal_msg,args...) \
55477 +({ \
55478 + read_lock(&grsec_exec_file_lock); \
55479 + gr_add_learn_entry(normal_msg "\n", ## args); \
55480 + read_unlock(&grsec_exec_file_lock); \
55481 +})
55482 +
55483 +enum {
55484 + GR_DO_AUDIT,
55485 + GR_DONT_AUDIT,
55486 + GR_DONT_AUDIT_GOOD
55487 +};
55488 +
55489 +enum {
55490 + GR_TTYSNIFF,
55491 + GR_RBAC,
55492 + GR_RBAC_STR,
55493 + GR_STR_RBAC,
55494 + GR_RBAC_MODE2,
55495 + GR_RBAC_MODE3,
55496 + GR_FILENAME,
55497 + GR_SYSCTL_HIDDEN,
55498 + GR_NOARGS,
55499 + GR_ONE_INT,
55500 + GR_ONE_INT_TWO_STR,
55501 + GR_ONE_STR,
55502 + GR_STR_INT,
55503 + GR_TWO_STR_INT,
55504 + GR_TWO_INT,
55505 + GR_TWO_U64,
55506 + GR_THREE_INT,
55507 + GR_FIVE_INT_TWO_STR,
55508 + GR_TWO_STR,
55509 + GR_THREE_STR,
55510 + GR_FOUR_STR,
55511 + GR_STR_FILENAME,
55512 + GR_FILENAME_STR,
55513 + GR_FILENAME_TWO_INT,
55514 + GR_FILENAME_TWO_INT_STR,
55515 + GR_TEXTREL,
55516 + GR_PTRACE,
55517 + GR_RESOURCE,
55518 + GR_CAP,
55519 + GR_SIG,
55520 + GR_SIG2,
55521 + GR_CRASH1,
55522 + GR_CRASH2,
55523 + GR_PSACCT,
55524 + GR_RWXMAP
55525 +};
55526 +
55527 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
55528 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
55529 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
55530 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
55531 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
55532 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
55533 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
55534 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
55535 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
55536 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
55537 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
55538 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
55539 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
55540 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
55541 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
55542 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
55543 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
55544 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
55545 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
55546 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
55547 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
55548 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
55549 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
55550 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
55551 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
55552 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
55553 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
55554 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
55555 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
55556 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
55557 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
55558 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
55559 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
55560 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
55561 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
55562 +
55563 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
55564 +
55565 +#endif
55566 +
55567 +#endif
55568 diff -urNp linux-2.6.32.42/include/linux/grmsg.h linux-2.6.32.42/include/linux/grmsg.h
55569 --- linux-2.6.32.42/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
55570 +++ linux-2.6.32.42/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
55571 @@ -0,0 +1,108 @@
55572 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
55573 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
55574 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
55575 +#define GR_STOPMOD_MSG "denied modification of module state by "
55576 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
55577 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
55578 +#define GR_IOPERM_MSG "denied use of ioperm() by "
55579 +#define GR_IOPL_MSG "denied use of iopl() by "
55580 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
55581 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
55582 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
55583 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
55584 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
55585 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
55586 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
55587 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
55588 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
55589 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
55590 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
55591 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
55592 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
55593 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
55594 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
55595 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
55596 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
55597 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
55598 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
55599 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
55600 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
55601 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
55602 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
55603 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
55604 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
55605 +#define GR_NPROC_MSG "denied overstep of process limit by "
55606 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
55607 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
55608 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
55609 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
55610 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
55611 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
55612 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
55613 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
55614 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
55615 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
55616 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
55617 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
55618 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
55619 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
55620 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
55621 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
55622 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
55623 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
55624 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
55625 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
55626 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
55627 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
55628 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
55629 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
55630 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
55631 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
55632 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
55633 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
55634 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
55635 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
55636 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
55637 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
55638 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
55639 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
55640 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
55641 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
55642 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
55643 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
55644 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
55645 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
55646 +#define GR_NICE_CHROOT_MSG "denied priority change by "
55647 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
55648 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
55649 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
55650 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
55651 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
55652 +#define GR_TIME_MSG "time set by "
55653 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
55654 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
55655 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
55656 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
55657 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
55658 +#define GR_BIND_MSG "denied bind() by "
55659 +#define GR_CONNECT_MSG "denied connect() by "
55660 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
55661 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
55662 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
55663 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
55664 +#define GR_CAP_ACL_MSG "use of %s denied for "
55665 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
55666 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
55667 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
55668 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
55669 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
55670 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
55671 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
55672 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
55673 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
55674 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
55675 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
55676 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
55677 +#define GR_VM86_MSG "denied use of vm86 by "
55678 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
55679 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
55680 diff -urNp linux-2.6.32.42/include/linux/grsecurity.h linux-2.6.32.42/include/linux/grsecurity.h
55681 --- linux-2.6.32.42/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
55682 +++ linux-2.6.32.42/include/linux/grsecurity.h 2011-04-17 15:56:46.000000000 -0400
55683 @@ -0,0 +1,212 @@
55684 +#ifndef GR_SECURITY_H
55685 +#define GR_SECURITY_H
55686 +#include <linux/fs.h>
55687 +#include <linux/fs_struct.h>
55688 +#include <linux/binfmts.h>
55689 +#include <linux/gracl.h>
55690 +#include <linux/compat.h>
55691 +
55692 +/* notify of brain-dead configs */
55693 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
55694 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
55695 +#endif
55696 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55697 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55698 +#endif
55699 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55700 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55701 +#endif
55702 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
55703 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
55704 +#endif
55705 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
55706 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
55707 +#endif
55708 +
55709 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
55710 +void gr_handle_brute_check(void);
55711 +void gr_handle_kernel_exploit(void);
55712 +int gr_process_user_ban(void);
55713 +
55714 +char gr_roletype_to_char(void);
55715 +
55716 +int gr_acl_enable_at_secure(void);
55717 +
55718 +int gr_check_user_change(int real, int effective, int fs);
55719 +int gr_check_group_change(int real, int effective, int fs);
55720 +
55721 +void gr_del_task_from_ip_table(struct task_struct *p);
55722 +
55723 +int gr_pid_is_chrooted(struct task_struct *p);
55724 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
55725 +int gr_handle_chroot_nice(void);
55726 +int gr_handle_chroot_sysctl(const int op);
55727 +int gr_handle_chroot_setpriority(struct task_struct *p,
55728 + const int niceval);
55729 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
55730 +int gr_handle_chroot_chroot(const struct dentry *dentry,
55731 + const struct vfsmount *mnt);
55732 +int gr_handle_chroot_caps(struct path *path);
55733 +void gr_handle_chroot_chdir(struct path *path);
55734 +int gr_handle_chroot_chmod(const struct dentry *dentry,
55735 + const struct vfsmount *mnt, const int mode);
55736 +int gr_handle_chroot_mknod(const struct dentry *dentry,
55737 + const struct vfsmount *mnt, const int mode);
55738 +int gr_handle_chroot_mount(const struct dentry *dentry,
55739 + const struct vfsmount *mnt,
55740 + const char *dev_name);
55741 +int gr_handle_chroot_pivot(void);
55742 +int gr_handle_chroot_unix(const pid_t pid);
55743 +
55744 +int gr_handle_rawio(const struct inode *inode);
55745 +int gr_handle_nproc(void);
55746 +
55747 +void gr_handle_ioperm(void);
55748 +void gr_handle_iopl(void);
55749 +
55750 +int gr_tpe_allow(const struct file *file);
55751 +
55752 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
55753 +void gr_clear_chroot_entries(struct task_struct *task);
55754 +
55755 +void gr_log_forkfail(const int retval);
55756 +void gr_log_timechange(void);
55757 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
55758 +void gr_log_chdir(const struct dentry *dentry,
55759 + const struct vfsmount *mnt);
55760 +void gr_log_chroot_exec(const struct dentry *dentry,
55761 + const struct vfsmount *mnt);
55762 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
55763 +#ifdef CONFIG_COMPAT
55764 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
55765 +#endif
55766 +void gr_log_remount(const char *devname, const int retval);
55767 +void gr_log_unmount(const char *devname, const int retval);
55768 +void gr_log_mount(const char *from, const char *to, const int retval);
55769 +void gr_log_textrel(struct vm_area_struct *vma);
55770 +void gr_log_rwxmmap(struct file *file);
55771 +void gr_log_rwxmprotect(struct file *file);
55772 +
55773 +int gr_handle_follow_link(const struct inode *parent,
55774 + const struct inode *inode,
55775 + const struct dentry *dentry,
55776 + const struct vfsmount *mnt);
55777 +int gr_handle_fifo(const struct dentry *dentry,
55778 + const struct vfsmount *mnt,
55779 + const struct dentry *dir, const int flag,
55780 + const int acc_mode);
55781 +int gr_handle_hardlink(const struct dentry *dentry,
55782 + const struct vfsmount *mnt,
55783 + struct inode *inode,
55784 + const int mode, const char *to);
55785 +
55786 +int gr_is_capable(const int cap);
55787 +int gr_is_capable_nolog(const int cap);
55788 +void gr_learn_resource(const struct task_struct *task, const int limit,
55789 + const unsigned long wanted, const int gt);
55790 +void gr_copy_label(struct task_struct *tsk);
55791 +void gr_handle_crash(struct task_struct *task, const int sig);
55792 +int gr_handle_signal(const struct task_struct *p, const int sig);
55793 +int gr_check_crash_uid(const uid_t uid);
55794 +int gr_check_protected_task(const struct task_struct *task);
55795 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
55796 +int gr_acl_handle_mmap(const struct file *file,
55797 + const unsigned long prot);
55798 +int gr_acl_handle_mprotect(const struct file *file,
55799 + const unsigned long prot);
55800 +int gr_check_hidden_task(const struct task_struct *tsk);
55801 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
55802 + const struct vfsmount *mnt);
55803 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
55804 + const struct vfsmount *mnt);
55805 +__u32 gr_acl_handle_access(const struct dentry *dentry,
55806 + const struct vfsmount *mnt, const int fmode);
55807 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
55808 + const struct vfsmount *mnt, mode_t mode);
55809 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
55810 + const struct vfsmount *mnt, mode_t mode);
55811 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
55812 + const struct vfsmount *mnt);
55813 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
55814 + const struct vfsmount *mnt);
55815 +int gr_handle_ptrace(struct task_struct *task, const long request);
55816 +int gr_handle_proc_ptrace(struct task_struct *task);
55817 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
55818 + const struct vfsmount *mnt);
55819 +int gr_check_crash_exec(const struct file *filp);
55820 +int gr_acl_is_enabled(void);
55821 +void gr_set_kernel_label(struct task_struct *task);
55822 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
55823 + const gid_t gid);
55824 +int gr_set_proc_label(const struct dentry *dentry,
55825 + const struct vfsmount *mnt,
55826 + const int unsafe_share);
55827 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
55828 + const struct vfsmount *mnt);
55829 +__u32 gr_acl_handle_open(const struct dentry *dentry,
55830 + const struct vfsmount *mnt, const int fmode);
55831 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
55832 + const struct dentry *p_dentry,
55833 + const struct vfsmount *p_mnt, const int fmode,
55834 + const int imode);
55835 +void gr_handle_create(const struct dentry *dentry,
55836 + const struct vfsmount *mnt);
55837 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
55838 + const struct dentry *parent_dentry,
55839 + const struct vfsmount *parent_mnt,
55840 + const int mode);
55841 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
55842 + const struct dentry *parent_dentry,
55843 + const struct vfsmount *parent_mnt);
55844 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
55845 + const struct vfsmount *mnt);
55846 +void gr_handle_delete(const ino_t ino, const dev_t dev);
55847 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
55848 + const struct vfsmount *mnt);
55849 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
55850 + const struct dentry *parent_dentry,
55851 + const struct vfsmount *parent_mnt,
55852 + const char *from);
55853 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
55854 + const struct dentry *parent_dentry,
55855 + const struct vfsmount *parent_mnt,
55856 + const struct dentry *old_dentry,
55857 + const struct vfsmount *old_mnt, const char *to);
55858 +int gr_acl_handle_rename(struct dentry *new_dentry,
55859 + struct dentry *parent_dentry,
55860 + const struct vfsmount *parent_mnt,
55861 + struct dentry *old_dentry,
55862 + struct inode *old_parent_inode,
55863 + struct vfsmount *old_mnt, const char *newname);
55864 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
55865 + struct dentry *old_dentry,
55866 + struct dentry *new_dentry,
55867 + struct vfsmount *mnt, const __u8 replace);
55868 +__u32 gr_check_link(const struct dentry *new_dentry,
55869 + const struct dentry *parent_dentry,
55870 + const struct vfsmount *parent_mnt,
55871 + const struct dentry *old_dentry,
55872 + const struct vfsmount *old_mnt);
55873 +int gr_acl_handle_filldir(const struct file *file, const char *name,
55874 + const unsigned int namelen, const ino_t ino);
55875 +
55876 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
55877 + const struct vfsmount *mnt);
55878 +void gr_acl_handle_exit(void);
55879 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
55880 +int gr_acl_handle_procpidmem(const struct task_struct *task);
55881 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
55882 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
55883 +void gr_audit_ptrace(struct task_struct *task);
55884 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
55885 +
55886 +#ifdef CONFIG_GRKERNSEC
55887 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
55888 +void gr_handle_vm86(void);
55889 +void gr_handle_mem_readwrite(u64 from, u64 to);
55890 +
55891 +extern int grsec_enable_dmesg;
55892 +extern int grsec_disable_privio;
55893 +#endif
55894 +
55895 +#endif
55896 diff -urNp linux-2.6.32.42/include/linux/hdpu_features.h linux-2.6.32.42/include/linux/hdpu_features.h
55897 --- linux-2.6.32.42/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
55898 +++ linux-2.6.32.42/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
55899 @@ -3,7 +3,7 @@
55900 struct cpustate_t {
55901 spinlock_t lock;
55902 int excl;
55903 - int open_count;
55904 + atomic_t open_count;
55905 unsigned char cached_val;
55906 int inited;
55907 unsigned long *set_addr;
55908 diff -urNp linux-2.6.32.42/include/linux/highmem.h linux-2.6.32.42/include/linux/highmem.h
55909 --- linux-2.6.32.42/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
55910 +++ linux-2.6.32.42/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
55911 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct
55912 kunmap_atomic(kaddr, KM_USER0);
55913 }
55914
55915 +static inline void sanitize_highpage(struct page *page)
55916 +{
55917 + void *kaddr;
55918 + unsigned long flags;
55919 +
55920 + local_irq_save(flags);
55921 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
55922 + clear_page(kaddr);
55923 + kunmap_atomic(kaddr, KM_CLEARPAGE);
55924 + local_irq_restore(flags);
55925 +}
55926 +
55927 static inline void zero_user_segments(struct page *page,
55928 unsigned start1, unsigned end1,
55929 unsigned start2, unsigned end2)
55930 diff -urNp linux-2.6.32.42/include/linux/i2o.h linux-2.6.32.42/include/linux/i2o.h
55931 --- linux-2.6.32.42/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
55932 +++ linux-2.6.32.42/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
55933 @@ -564,7 +564,7 @@ struct i2o_controller {
55934 struct i2o_device *exec; /* Executive */
55935 #if BITS_PER_LONG == 64
55936 spinlock_t context_list_lock; /* lock for context_list */
55937 - atomic_t context_list_counter; /* needed for unique contexts */
55938 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
55939 struct list_head context_list; /* list of context id's
55940 and pointers */
55941 #endif
55942 diff -urNp linux-2.6.32.42/include/linux/init_task.h linux-2.6.32.42/include/linux/init_task.h
55943 --- linux-2.6.32.42/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
55944 +++ linux-2.6.32.42/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
55945 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
55946 #define INIT_IDS
55947 #endif
55948
55949 +#ifdef CONFIG_X86
55950 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
55951 +#else
55952 +#define INIT_TASK_THREAD_INFO
55953 +#endif
55954 +
55955 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
55956 /*
55957 * Because of the reduced scope of CAP_SETPCAP when filesystem
55958 @@ -156,6 +162,7 @@ extern struct cred init_cred;
55959 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
55960 .comm = "swapper", \
55961 .thread = INIT_THREAD, \
55962 + INIT_TASK_THREAD_INFO \
55963 .fs = &init_fs, \
55964 .files = &init_files, \
55965 .signal = &init_signals, \
55966 diff -urNp linux-2.6.32.42/include/linux/interrupt.h linux-2.6.32.42/include/linux/interrupt.h
55967 --- linux-2.6.32.42/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
55968 +++ linux-2.6.32.42/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
55969 @@ -363,7 +363,7 @@ enum
55970 /* map softirq index to softirq name. update 'softirq_to_name' in
55971 * kernel/softirq.c when adding a new softirq.
55972 */
55973 -extern char *softirq_to_name[NR_SOFTIRQS];
55974 +extern const char * const softirq_to_name[NR_SOFTIRQS];
55975
55976 /* softirq mask and active fields moved to irq_cpustat_t in
55977 * asm/hardirq.h to get better cache usage. KAO
55978 @@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
55979
55980 struct softirq_action
55981 {
55982 - void (*action)(struct softirq_action *);
55983 + void (*action)(void);
55984 };
55985
55986 asmlinkage void do_softirq(void);
55987 asmlinkage void __do_softirq(void);
55988 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
55989 +extern void open_softirq(int nr, void (*action)(void));
55990 extern void softirq_init(void);
55991 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
55992 extern void raise_softirq_irqoff(unsigned int nr);
55993 diff -urNp linux-2.6.32.42/include/linux/irq.h linux-2.6.32.42/include/linux/irq.h
55994 --- linux-2.6.32.42/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
55995 +++ linux-2.6.32.42/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
55996 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
55997 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
55998 bool boot)
55999 {
56000 +#ifdef CONFIG_CPUMASK_OFFSTACK
56001 gfp_t gfp = GFP_ATOMIC;
56002
56003 if (boot)
56004 gfp = GFP_NOWAIT;
56005
56006 -#ifdef CONFIG_CPUMASK_OFFSTACK
56007 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
56008 return false;
56009
56010 diff -urNp linux-2.6.32.42/include/linux/kallsyms.h linux-2.6.32.42/include/linux/kallsyms.h
56011 --- linux-2.6.32.42/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
56012 +++ linux-2.6.32.42/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
56013 @@ -15,7 +15,8 @@
56014
56015 struct module;
56016
56017 -#ifdef CONFIG_KALLSYMS
56018 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
56019 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
56020 /* Lookup the address for a symbol. Returns 0 if not found. */
56021 unsigned long kallsyms_lookup_name(const char *name);
56022
56023 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
56024 /* Stupid that this does nothing, but I didn't create this mess. */
56025 #define __print_symbol(fmt, addr)
56026 #endif /*CONFIG_KALLSYMS*/
56027 +#else /* when included by kallsyms.c, vsnprintf.c, or
56028 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
56029 +extern void __print_symbol(const char *fmt, unsigned long address);
56030 +extern int sprint_symbol(char *buffer, unsigned long address);
56031 +const char *kallsyms_lookup(unsigned long addr,
56032 + unsigned long *symbolsize,
56033 + unsigned long *offset,
56034 + char **modname, char *namebuf);
56035 +#endif
56036
56037 /* This macro allows us to keep printk typechecking */
56038 static void __check_printsym_format(const char *fmt, ...)
56039 diff -urNp linux-2.6.32.42/include/linux/kgdb.h linux-2.6.32.42/include/linux/kgdb.h
56040 --- linux-2.6.32.42/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
56041 +++ linux-2.6.32.42/include/linux/kgdb.h 2011-05-04 17:56:20.000000000 -0400
56042 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
56043
56044 extern int kgdb_connected;
56045
56046 -extern atomic_t kgdb_setting_breakpoint;
56047 -extern atomic_t kgdb_cpu_doing_single_step;
56048 +extern atomic_unchecked_t kgdb_setting_breakpoint;
56049 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
56050
56051 extern struct task_struct *kgdb_usethread;
56052 extern struct task_struct *kgdb_contthread;
56053 @@ -251,20 +251,20 @@ struct kgdb_arch {
56054 */
56055 struct kgdb_io {
56056 const char *name;
56057 - int (*read_char) (void);
56058 - void (*write_char) (u8);
56059 - void (*flush) (void);
56060 - int (*init) (void);
56061 - void (*pre_exception) (void);
56062 - void (*post_exception) (void);
56063 + int (* const read_char) (void);
56064 + void (* const write_char) (u8);
56065 + void (* const flush) (void);
56066 + int (* const init) (void);
56067 + void (* const pre_exception) (void);
56068 + void (* const post_exception) (void);
56069 };
56070
56071 -extern struct kgdb_arch arch_kgdb_ops;
56072 +extern const struct kgdb_arch arch_kgdb_ops;
56073
56074 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
56075
56076 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
56077 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
56078 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
56079 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
56080
56081 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
56082 extern int kgdb_mem2hex(char *mem, char *buf, int count);
56083 diff -urNp linux-2.6.32.42/include/linux/kmod.h linux-2.6.32.42/include/linux/kmod.h
56084 --- linux-2.6.32.42/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
56085 +++ linux-2.6.32.42/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
56086 @@ -31,6 +31,8 @@
56087 * usually useless though. */
56088 extern int __request_module(bool wait, const char *name, ...) \
56089 __attribute__((format(printf, 2, 3)));
56090 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
56091 + __attribute__((format(printf, 3, 4)));
56092 #define request_module(mod...) __request_module(true, mod)
56093 #define request_module_nowait(mod...) __request_module(false, mod)
56094 #define try_then_request_module(x, mod...) \
56095 diff -urNp linux-2.6.32.42/include/linux/kobject.h linux-2.6.32.42/include/linux/kobject.h
56096 --- linux-2.6.32.42/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
56097 +++ linux-2.6.32.42/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
56098 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
56099
56100 struct kobj_type {
56101 void (*release)(struct kobject *kobj);
56102 - struct sysfs_ops *sysfs_ops;
56103 + const struct sysfs_ops *sysfs_ops;
56104 struct attribute **default_attrs;
56105 };
56106
56107 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
56108 };
56109
56110 struct kset_uevent_ops {
56111 - int (*filter)(struct kset *kset, struct kobject *kobj);
56112 - const char *(*name)(struct kset *kset, struct kobject *kobj);
56113 - int (*uevent)(struct kset *kset, struct kobject *kobj,
56114 + int (* const filter)(struct kset *kset, struct kobject *kobj);
56115 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
56116 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
56117 struct kobj_uevent_env *env);
56118 };
56119
56120 @@ -132,7 +132,7 @@ struct kobj_attribute {
56121 const char *buf, size_t count);
56122 };
56123
56124 -extern struct sysfs_ops kobj_sysfs_ops;
56125 +extern const struct sysfs_ops kobj_sysfs_ops;
56126
56127 /**
56128 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
56129 @@ -155,14 +155,14 @@ struct kset {
56130 struct list_head list;
56131 spinlock_t list_lock;
56132 struct kobject kobj;
56133 - struct kset_uevent_ops *uevent_ops;
56134 + const struct kset_uevent_ops *uevent_ops;
56135 };
56136
56137 extern void kset_init(struct kset *kset);
56138 extern int __must_check kset_register(struct kset *kset);
56139 extern void kset_unregister(struct kset *kset);
56140 extern struct kset * __must_check kset_create_and_add(const char *name,
56141 - struct kset_uevent_ops *u,
56142 + const struct kset_uevent_ops *u,
56143 struct kobject *parent_kobj);
56144
56145 static inline struct kset *to_kset(struct kobject *kobj)
56146 diff -urNp linux-2.6.32.42/include/linux/kvm_host.h linux-2.6.32.42/include/linux/kvm_host.h
56147 --- linux-2.6.32.42/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
56148 +++ linux-2.6.32.42/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
56149 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
56150 void vcpu_load(struct kvm_vcpu *vcpu);
56151 void vcpu_put(struct kvm_vcpu *vcpu);
56152
56153 -int kvm_init(void *opaque, unsigned int vcpu_size,
56154 +int kvm_init(const void *opaque, unsigned int vcpu_size,
56155 struct module *module);
56156 void kvm_exit(void);
56157
56158 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
56159 struct kvm_guest_debug *dbg);
56160 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
56161
56162 -int kvm_arch_init(void *opaque);
56163 +int kvm_arch_init(const void *opaque);
56164 void kvm_arch_exit(void);
56165
56166 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
56167 diff -urNp linux-2.6.32.42/include/linux/libata.h linux-2.6.32.42/include/linux/libata.h
56168 --- linux-2.6.32.42/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
56169 +++ linux-2.6.32.42/include/linux/libata.h 2011-04-23 12:56:11.000000000 -0400
56170 @@ -525,11 +525,11 @@ struct ata_ioports {
56171
56172 struct ata_host {
56173 spinlock_t lock;
56174 - struct device *dev;
56175 + struct device *dev;
56176 void __iomem * const *iomap;
56177 unsigned int n_ports;
56178 void *private_data;
56179 - struct ata_port_operations *ops;
56180 + const struct ata_port_operations *ops;
56181 unsigned long flags;
56182 #ifdef CONFIG_ATA_ACPI
56183 acpi_handle acpi_handle;
56184 @@ -710,7 +710,7 @@ struct ata_link {
56185
56186 struct ata_port {
56187 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
56188 - struct ata_port_operations *ops;
56189 + const struct ata_port_operations *ops;
56190 spinlock_t *lock;
56191 /* Flags owned by the EH context. Only EH should touch these once the
56192 port is active */
56193 @@ -892,7 +892,7 @@ struct ata_port_info {
56194 unsigned long pio_mask;
56195 unsigned long mwdma_mask;
56196 unsigned long udma_mask;
56197 - struct ata_port_operations *port_ops;
56198 + const struct ata_port_operations *port_ops;
56199 void *private_data;
56200 };
56201
56202 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
56203 extern const unsigned long sata_deb_timing_hotplug[];
56204 extern const unsigned long sata_deb_timing_long[];
56205
56206 -extern struct ata_port_operations ata_dummy_port_ops;
56207 +extern const struct ata_port_operations ata_dummy_port_ops;
56208 extern const struct ata_port_info ata_dummy_port_info;
56209
56210 static inline const unsigned long *
56211 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
56212 struct scsi_host_template *sht);
56213 extern void ata_host_detach(struct ata_host *host);
56214 extern void ata_host_init(struct ata_host *, struct device *,
56215 - unsigned long, struct ata_port_operations *);
56216 + unsigned long, const struct ata_port_operations *);
56217 extern int ata_scsi_detect(struct scsi_host_template *sht);
56218 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
56219 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
56220 diff -urNp linux-2.6.32.42/include/linux/lockd/bind.h linux-2.6.32.42/include/linux/lockd/bind.h
56221 --- linux-2.6.32.42/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
56222 +++ linux-2.6.32.42/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
56223 @@ -23,13 +23,13 @@ struct svc_rqst;
56224 * This is the set of functions for lockd->nfsd communication
56225 */
56226 struct nlmsvc_binding {
56227 - __be32 (*fopen)(struct svc_rqst *,
56228 + __be32 (* const fopen)(struct svc_rqst *,
56229 struct nfs_fh *,
56230 struct file **);
56231 - void (*fclose)(struct file *);
56232 + void (* const fclose)(struct file *);
56233 };
56234
56235 -extern struct nlmsvc_binding * nlmsvc_ops;
56236 +extern const struct nlmsvc_binding * nlmsvc_ops;
56237
56238 /*
56239 * Similar to nfs_client_initdata, but without the NFS-specific
56240 diff -urNp linux-2.6.32.42/include/linux/mm.h linux-2.6.32.42/include/linux/mm.h
56241 --- linux-2.6.32.42/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
56242 +++ linux-2.6.32.42/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
56243 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
56244
56245 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
56246 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
56247 +
56248 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
56249 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
56250 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
56251 +#else
56252 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
56253 +#endif
56254 +
56255 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
56256 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
56257
56258 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
56259 int set_page_dirty_lock(struct page *page);
56260 int clear_page_dirty_for_io(struct page *page);
56261
56262 -/* Is the vma a continuation of the stack vma above it? */
56263 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
56264 -{
56265 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
56266 -}
56267 -
56268 extern unsigned long move_page_tables(struct vm_area_struct *vma,
56269 unsigned long old_addr, struct vm_area_struct *new_vma,
56270 unsigned long new_addr, unsigned long len);
56271 @@ -890,6 +891,8 @@ struct shrinker {
56272 extern void register_shrinker(struct shrinker *);
56273 extern void unregister_shrinker(struct shrinker *);
56274
56275 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
56276 +
56277 int vma_wants_writenotify(struct vm_area_struct *vma);
56278
56279 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
56280 @@ -1162,6 +1165,7 @@ out:
56281 }
56282
56283 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
56284 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
56285
56286 extern unsigned long do_brk(unsigned long, unsigned long);
56287
56288 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
56289 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
56290 struct vm_area_struct **pprev);
56291
56292 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
56293 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
56294 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
56295 +
56296 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
56297 NULL if none. Assume start_addr < end_addr. */
56298 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
56299 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
56300 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
56301 }
56302
56303 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
56304 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
56305 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
56306 unsigned long pfn, unsigned long size, pgprot_t);
56307 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
56308 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
56309 extern int sysctl_memory_failure_early_kill;
56310 extern int sysctl_memory_failure_recovery;
56311 -extern atomic_long_t mce_bad_pages;
56312 +extern atomic_long_unchecked_t mce_bad_pages;
56313 +
56314 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56315 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
56316 +#else
56317 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
56318 +#endif
56319
56320 #endif /* __KERNEL__ */
56321 #endif /* _LINUX_MM_H */
56322 diff -urNp linux-2.6.32.42/include/linux/mm_types.h linux-2.6.32.42/include/linux/mm_types.h
56323 --- linux-2.6.32.42/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
56324 +++ linux-2.6.32.42/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
56325 @@ -186,6 +186,8 @@ struct vm_area_struct {
56326 #ifdef CONFIG_NUMA
56327 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
56328 #endif
56329 +
56330 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
56331 };
56332
56333 struct core_thread {
56334 @@ -287,6 +289,24 @@ struct mm_struct {
56335 #ifdef CONFIG_MMU_NOTIFIER
56336 struct mmu_notifier_mm *mmu_notifier_mm;
56337 #endif
56338 +
56339 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56340 + unsigned long pax_flags;
56341 +#endif
56342 +
56343 +#ifdef CONFIG_PAX_DLRESOLVE
56344 + unsigned long call_dl_resolve;
56345 +#endif
56346 +
56347 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
56348 + unsigned long call_syscall;
56349 +#endif
56350 +
56351 +#ifdef CONFIG_PAX_ASLR
56352 + unsigned long delta_mmap; /* randomized offset */
56353 + unsigned long delta_stack; /* randomized offset */
56354 +#endif
56355 +
56356 };
56357
56358 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
56359 diff -urNp linux-2.6.32.42/include/linux/mmu_notifier.h linux-2.6.32.42/include/linux/mmu_notifier.h
56360 --- linux-2.6.32.42/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
56361 +++ linux-2.6.32.42/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
56362 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
56363 */
56364 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
56365 ({ \
56366 - pte_t __pte; \
56367 + pte_t ___pte; \
56368 struct vm_area_struct *___vma = __vma; \
56369 unsigned long ___address = __address; \
56370 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
56371 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
56372 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
56373 - __pte; \
56374 + ___pte; \
56375 })
56376
56377 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
56378 diff -urNp linux-2.6.32.42/include/linux/mmzone.h linux-2.6.32.42/include/linux/mmzone.h
56379 --- linux-2.6.32.42/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
56380 +++ linux-2.6.32.42/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
56381 @@ -350,7 +350,7 @@ struct zone {
56382 unsigned long flags; /* zone flags, see below */
56383
56384 /* Zone statistics */
56385 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56386 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56387
56388 /*
56389 * prev_priority holds the scanning priority for this zone. It is
56390 diff -urNp linux-2.6.32.42/include/linux/mod_devicetable.h linux-2.6.32.42/include/linux/mod_devicetable.h
56391 --- linux-2.6.32.42/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
56392 +++ linux-2.6.32.42/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
56393 @@ -12,7 +12,7 @@
56394 typedef unsigned long kernel_ulong_t;
56395 #endif
56396
56397 -#define PCI_ANY_ID (~0)
56398 +#define PCI_ANY_ID ((__u16)~0)
56399
56400 struct pci_device_id {
56401 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
56402 @@ -131,7 +131,7 @@ struct usb_device_id {
56403 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
56404 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
56405
56406 -#define HID_ANY_ID (~0)
56407 +#define HID_ANY_ID (~0U)
56408
56409 struct hid_device_id {
56410 __u16 bus;
56411 diff -urNp linux-2.6.32.42/include/linux/module.h linux-2.6.32.42/include/linux/module.h
56412 --- linux-2.6.32.42/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
56413 +++ linux-2.6.32.42/include/linux/module.h 2011-04-17 15:56:46.000000000 -0400
56414 @@ -287,16 +287,16 @@ struct module
56415 int (*init)(void);
56416
56417 /* If this is non-NULL, vfree after init() returns */
56418 - void *module_init;
56419 + void *module_init_rx, *module_init_rw;
56420
56421 /* Here is the actual code + data, vfree'd on unload. */
56422 - void *module_core;
56423 + void *module_core_rx, *module_core_rw;
56424
56425 /* Here are the sizes of the init and core sections */
56426 - unsigned int init_size, core_size;
56427 + unsigned int init_size_rw, core_size_rw;
56428
56429 /* The size of the executable code in each section. */
56430 - unsigned int init_text_size, core_text_size;
56431 + unsigned int init_size_rx, core_size_rx;
56432
56433 /* Arch-specific module values */
56434 struct mod_arch_specific arch;
56435 @@ -393,16 +393,46 @@ struct module *__module_address(unsigned
56436 bool is_module_address(unsigned long addr);
56437 bool is_module_text_address(unsigned long addr);
56438
56439 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
56440 +{
56441 +
56442 +#ifdef CONFIG_PAX_KERNEXEC
56443 + if (ktla_ktva(addr) >= (unsigned long)start &&
56444 + ktla_ktva(addr) < (unsigned long)start + size)
56445 + return 1;
56446 +#endif
56447 +
56448 + return ((void *)addr >= start && (void *)addr < start + size);
56449 +}
56450 +
56451 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
56452 +{
56453 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
56454 +}
56455 +
56456 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
56457 +{
56458 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
56459 +}
56460 +
56461 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
56462 +{
56463 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
56464 +}
56465 +
56466 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
56467 +{
56468 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
56469 +}
56470 +
56471 static inline int within_module_core(unsigned long addr, struct module *mod)
56472 {
56473 - return (unsigned long)mod->module_core <= addr &&
56474 - addr < (unsigned long)mod->module_core + mod->core_size;
56475 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
56476 }
56477
56478 static inline int within_module_init(unsigned long addr, struct module *mod)
56479 {
56480 - return (unsigned long)mod->module_init <= addr &&
56481 - addr < (unsigned long)mod->module_init + mod->init_size;
56482 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
56483 }
56484
56485 /* Search for module by name: must hold module_mutex. */
56486 diff -urNp linux-2.6.32.42/include/linux/moduleloader.h linux-2.6.32.42/include/linux/moduleloader.h
56487 --- linux-2.6.32.42/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
56488 +++ linux-2.6.32.42/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
56489 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
56490 sections. Returns NULL on failure. */
56491 void *module_alloc(unsigned long size);
56492
56493 +#ifdef CONFIG_PAX_KERNEXEC
56494 +void *module_alloc_exec(unsigned long size);
56495 +#else
56496 +#define module_alloc_exec(x) module_alloc(x)
56497 +#endif
56498 +
56499 /* Free memory returned from module_alloc. */
56500 void module_free(struct module *mod, void *module_region);
56501
56502 +#ifdef CONFIG_PAX_KERNEXEC
56503 +void module_free_exec(struct module *mod, void *module_region);
56504 +#else
56505 +#define module_free_exec(x, y) module_free((x), (y))
56506 +#endif
56507 +
56508 /* Apply the given relocation to the (simplified) ELF. Return -error
56509 or 0. */
56510 int apply_relocate(Elf_Shdr *sechdrs,
56511 diff -urNp linux-2.6.32.42/include/linux/moduleparam.h linux-2.6.32.42/include/linux/moduleparam.h
56512 --- linux-2.6.32.42/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
56513 +++ linux-2.6.32.42/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
56514 @@ -132,7 +132,7 @@ struct kparam_array
56515
56516 /* Actually copy string: maxlen param is usually sizeof(string). */
56517 #define module_param_string(name, string, len, perm) \
56518 - static const struct kparam_string __param_string_##name \
56519 + static const struct kparam_string __param_string_##name __used \
56520 = { len, string }; \
56521 __module_param_call(MODULE_PARAM_PREFIX, name, \
56522 param_set_copystring, param_get_string, \
56523 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
56524
56525 /* Comma-separated array: *nump is set to number they actually specified. */
56526 #define module_param_array_named(name, array, type, nump, perm) \
56527 - static const struct kparam_array __param_arr_##name \
56528 + static const struct kparam_array __param_arr_##name __used \
56529 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
56530 sizeof(array[0]), array }; \
56531 __module_param_call(MODULE_PARAM_PREFIX, name, \
56532 diff -urNp linux-2.6.32.42/include/linux/mutex.h linux-2.6.32.42/include/linux/mutex.h
56533 --- linux-2.6.32.42/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
56534 +++ linux-2.6.32.42/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
56535 @@ -51,7 +51,7 @@ struct mutex {
56536 spinlock_t wait_lock;
56537 struct list_head wait_list;
56538 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
56539 - struct thread_info *owner;
56540 + struct task_struct *owner;
56541 #endif
56542 #ifdef CONFIG_DEBUG_MUTEXES
56543 const char *name;
56544 diff -urNp linux-2.6.32.42/include/linux/namei.h linux-2.6.32.42/include/linux/namei.h
56545 --- linux-2.6.32.42/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
56546 +++ linux-2.6.32.42/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
56547 @@ -22,7 +22,7 @@ struct nameidata {
56548 unsigned int flags;
56549 int last_type;
56550 unsigned depth;
56551 - char *saved_names[MAX_NESTED_LINKS + 1];
56552 + const char *saved_names[MAX_NESTED_LINKS + 1];
56553
56554 /* Intent data */
56555 union {
56556 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
56557 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
56558 extern void unlock_rename(struct dentry *, struct dentry *);
56559
56560 -static inline void nd_set_link(struct nameidata *nd, char *path)
56561 +static inline void nd_set_link(struct nameidata *nd, const char *path)
56562 {
56563 nd->saved_names[nd->depth] = path;
56564 }
56565
56566 -static inline char *nd_get_link(struct nameidata *nd)
56567 +static inline const char *nd_get_link(const struct nameidata *nd)
56568 {
56569 return nd->saved_names[nd->depth];
56570 }
56571 diff -urNp linux-2.6.32.42/include/linux/netfilter/xt_gradm.h linux-2.6.32.42/include/linux/netfilter/xt_gradm.h
56572 --- linux-2.6.32.42/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
56573 +++ linux-2.6.32.42/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
56574 @@ -0,0 +1,9 @@
56575 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
56576 +#define _LINUX_NETFILTER_XT_GRADM_H 1
56577 +
56578 +struct xt_gradm_mtinfo {
56579 + __u16 flags;
56580 + __u16 invflags;
56581 +};
56582 +
56583 +#endif
56584 diff -urNp linux-2.6.32.42/include/linux/nodemask.h linux-2.6.32.42/include/linux/nodemask.h
56585 --- linux-2.6.32.42/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
56586 +++ linux-2.6.32.42/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
56587 @@ -464,11 +464,11 @@ static inline int num_node_state(enum no
56588
56589 #define any_online_node(mask) \
56590 ({ \
56591 - int node; \
56592 - for_each_node_mask(node, (mask)) \
56593 - if (node_online(node)) \
56594 + int __node; \
56595 + for_each_node_mask(__node, (mask)) \
56596 + if (node_online(__node)) \
56597 break; \
56598 - node; \
56599 + __node; \
56600 })
56601
56602 #define num_online_nodes() num_node_state(N_ONLINE)
56603 diff -urNp linux-2.6.32.42/include/linux/oprofile.h linux-2.6.32.42/include/linux/oprofile.h
56604 --- linux-2.6.32.42/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
56605 +++ linux-2.6.32.42/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
56606 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
56607 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
56608 char const * name, ulong * val);
56609
56610 -/** Create a file for read-only access to an atomic_t. */
56611 +/** Create a file for read-only access to an atomic_unchecked_t. */
56612 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
56613 - char const * name, atomic_t * val);
56614 + char const * name, atomic_unchecked_t * val);
56615
56616 /** create a directory */
56617 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
56618 diff -urNp linux-2.6.32.42/include/linux/perf_event.h linux-2.6.32.42/include/linux/perf_event.h
56619 --- linux-2.6.32.42/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
56620 +++ linux-2.6.32.42/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
56621 @@ -476,7 +476,7 @@ struct hw_perf_event {
56622 struct hrtimer hrtimer;
56623 };
56624 };
56625 - atomic64_t prev_count;
56626 + atomic64_unchecked_t prev_count;
56627 u64 sample_period;
56628 u64 last_period;
56629 atomic64_t period_left;
56630 @@ -557,7 +557,7 @@ struct perf_event {
56631 const struct pmu *pmu;
56632
56633 enum perf_event_active_state state;
56634 - atomic64_t count;
56635 + atomic64_unchecked_t count;
56636
56637 /*
56638 * These are the total time in nanoseconds that the event
56639 @@ -595,8 +595,8 @@ struct perf_event {
56640 * These accumulate total time (in nanoseconds) that children
56641 * events have been enabled and running, respectively.
56642 */
56643 - atomic64_t child_total_time_enabled;
56644 - atomic64_t child_total_time_running;
56645 + atomic64_unchecked_t child_total_time_enabled;
56646 + atomic64_unchecked_t child_total_time_running;
56647
56648 /*
56649 * Protect attach/detach and child_list:
56650 diff -urNp linux-2.6.32.42/include/linux/pipe_fs_i.h linux-2.6.32.42/include/linux/pipe_fs_i.h
56651 --- linux-2.6.32.42/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
56652 +++ linux-2.6.32.42/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
56653 @@ -46,9 +46,9 @@ struct pipe_inode_info {
56654 wait_queue_head_t wait;
56655 unsigned int nrbufs, curbuf;
56656 struct page *tmp_page;
56657 - unsigned int readers;
56658 - unsigned int writers;
56659 - unsigned int waiting_writers;
56660 + atomic_t readers;
56661 + atomic_t writers;
56662 + atomic_t waiting_writers;
56663 unsigned int r_counter;
56664 unsigned int w_counter;
56665 struct fasync_struct *fasync_readers;
56666 diff -urNp linux-2.6.32.42/include/linux/poison.h linux-2.6.32.42/include/linux/poison.h
56667 --- linux-2.6.32.42/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
56668 +++ linux-2.6.32.42/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
56669 @@ -19,8 +19,8 @@
56670 * under normal circumstances, used to verify that nobody uses
56671 * non-initialized list entries.
56672 */
56673 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
56674 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
56675 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
56676 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
56677
56678 /********** include/linux/timer.h **********/
56679 /*
56680 diff -urNp linux-2.6.32.42/include/linux/proc_fs.h linux-2.6.32.42/include/linux/proc_fs.h
56681 --- linux-2.6.32.42/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
56682 +++ linux-2.6.32.42/include/linux/proc_fs.h 2011-04-17 15:56:46.000000000 -0400
56683 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
56684 return proc_create_data(name, mode, parent, proc_fops, NULL);
56685 }
56686
56687 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
56688 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
56689 +{
56690 +#ifdef CONFIG_GRKERNSEC_PROC_USER
56691 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
56692 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56693 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
56694 +#else
56695 + return proc_create_data(name, mode, parent, proc_fops, NULL);
56696 +#endif
56697 +}
56698 +
56699 +
56700 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
56701 mode_t mode, struct proc_dir_entry *base,
56702 read_proc_t *read_proc, void * data)
56703 diff -urNp linux-2.6.32.42/include/linux/ptrace.h linux-2.6.32.42/include/linux/ptrace.h
56704 --- linux-2.6.32.42/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
56705 +++ linux-2.6.32.42/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
56706 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
56707 extern void exit_ptrace(struct task_struct *tracer);
56708 #define PTRACE_MODE_READ 1
56709 #define PTRACE_MODE_ATTACH 2
56710 -/* Returns 0 on success, -errno on denial. */
56711 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
56712 /* Returns true on success, false on denial. */
56713 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
56714 +/* Returns true on success, false on denial. */
56715 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
56716
56717 static inline int ptrace_reparented(struct task_struct *child)
56718 {
56719 diff -urNp linux-2.6.32.42/include/linux/random.h linux-2.6.32.42/include/linux/random.h
56720 --- linux-2.6.32.42/include/linux/random.h 2011-03-27 14:31:47.000000000 -0400
56721 +++ linux-2.6.32.42/include/linux/random.h 2011-04-17 15:56:46.000000000 -0400
56722 @@ -74,6 +74,11 @@ unsigned long randomize_range(unsigned l
56723 u32 random32(void);
56724 void srandom32(u32 seed);
56725
56726 +static inline unsigned long pax_get_random_long(void)
56727 +{
56728 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
56729 +}
56730 +
56731 #endif /* __KERNEL___ */
56732
56733 #endif /* _LINUX_RANDOM_H */
56734 diff -urNp linux-2.6.32.42/include/linux/reboot.h linux-2.6.32.42/include/linux/reboot.h
56735 --- linux-2.6.32.42/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
56736 +++ linux-2.6.32.42/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
56737 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
56738 * Architecture-specific implementations of sys_reboot commands.
56739 */
56740
56741 -extern void machine_restart(char *cmd);
56742 -extern void machine_halt(void);
56743 -extern void machine_power_off(void);
56744 +extern void machine_restart(char *cmd) __noreturn;
56745 +extern void machine_halt(void) __noreturn;
56746 +extern void machine_power_off(void) __noreturn;
56747
56748 extern void machine_shutdown(void);
56749 struct pt_regs;
56750 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
56751 */
56752
56753 extern void kernel_restart_prepare(char *cmd);
56754 -extern void kernel_restart(char *cmd);
56755 -extern void kernel_halt(void);
56756 -extern void kernel_power_off(void);
56757 +extern void kernel_restart(char *cmd) __noreturn;
56758 +extern void kernel_halt(void) __noreturn;
56759 +extern void kernel_power_off(void) __noreturn;
56760
56761 void ctrl_alt_del(void);
56762
56763 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
56764 * Emergency restart, callable from an interrupt handler.
56765 */
56766
56767 -extern void emergency_restart(void);
56768 +extern void emergency_restart(void) __noreturn;
56769 #include <asm/emergency-restart.h>
56770
56771 #endif
56772 diff -urNp linux-2.6.32.42/include/linux/reiserfs_fs.h linux-2.6.32.42/include/linux/reiserfs_fs.h
56773 --- linux-2.6.32.42/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
56774 +++ linux-2.6.32.42/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
56775 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
56776 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
56777
56778 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
56779 -#define get_generation(s) atomic_read (&fs_generation(s))
56780 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
56781 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
56782 #define __fs_changed(gen,s) (gen != get_generation (s))
56783 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
56784 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
56785 */
56786
56787 struct item_operations {
56788 - int (*bytes_number) (struct item_head * ih, int block_size);
56789 - void (*decrement_key) (struct cpu_key *);
56790 - int (*is_left_mergeable) (struct reiserfs_key * ih,
56791 + int (* const bytes_number) (struct item_head * ih, int block_size);
56792 + void (* const decrement_key) (struct cpu_key *);
56793 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
56794 unsigned long bsize);
56795 - void (*print_item) (struct item_head *, char *item);
56796 - void (*check_item) (struct item_head *, char *item);
56797 + void (* const print_item) (struct item_head *, char *item);
56798 + void (* const check_item) (struct item_head *, char *item);
56799
56800 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
56801 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
56802 int is_affected, int insert_size);
56803 - int (*check_left) (struct virtual_item * vi, int free,
56804 + int (* const check_left) (struct virtual_item * vi, int free,
56805 int start_skip, int end_skip);
56806 - int (*check_right) (struct virtual_item * vi, int free);
56807 - int (*part_size) (struct virtual_item * vi, int from, int to);
56808 - int (*unit_num) (struct virtual_item * vi);
56809 - void (*print_vi) (struct virtual_item * vi);
56810 + int (* const check_right) (struct virtual_item * vi, int free);
56811 + int (* const part_size) (struct virtual_item * vi, int from, int to);
56812 + int (* const unit_num) (struct virtual_item * vi);
56813 + void (* const print_vi) (struct virtual_item * vi);
56814 };
56815
56816 -extern struct item_operations *item_ops[TYPE_ANY + 1];
56817 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
56818
56819 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
56820 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
56821 diff -urNp linux-2.6.32.42/include/linux/reiserfs_fs_sb.h linux-2.6.32.42/include/linux/reiserfs_fs_sb.h
56822 --- linux-2.6.32.42/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
56823 +++ linux-2.6.32.42/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
56824 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
56825 /* Comment? -Hans */
56826 wait_queue_head_t s_wait;
56827 /* To be obsoleted soon by per buffer seals.. -Hans */
56828 - atomic_t s_generation_counter; // increased by one every time the
56829 + atomic_unchecked_t s_generation_counter; // increased by one every time the
56830 // tree gets re-balanced
56831 unsigned long s_properties; /* File system properties. Currently holds
56832 on-disk FS format */
56833 diff -urNp linux-2.6.32.42/include/linux/sched.h linux-2.6.32.42/include/linux/sched.h
56834 --- linux-2.6.32.42/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
56835 +++ linux-2.6.32.42/include/linux/sched.h 2011-06-04 20:42:54.000000000 -0400
56836 @@ -101,6 +101,7 @@ struct bio;
56837 struct fs_struct;
56838 struct bts_context;
56839 struct perf_event_context;
56840 +struct linux_binprm;
56841
56842 /*
56843 * List of flags we want to share for kernel threads,
56844 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
56845 extern signed long schedule_timeout_uninterruptible(signed long timeout);
56846 asmlinkage void __schedule(void);
56847 asmlinkage void schedule(void);
56848 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
56849 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
56850
56851 struct nsproxy;
56852 struct user_namespace;
56853 @@ -371,9 +372,12 @@ struct user_namespace;
56854 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
56855
56856 extern int sysctl_max_map_count;
56857 +extern unsigned long sysctl_heap_stack_gap;
56858
56859 #include <linux/aio.h>
56860
56861 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
56862 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
56863 extern unsigned long
56864 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
56865 unsigned long, unsigned long);
56866 @@ -666,6 +670,16 @@ struct signal_struct {
56867 struct tty_audit_buf *tty_audit_buf;
56868 #endif
56869
56870 +#ifdef CONFIG_GRKERNSEC
56871 + u32 curr_ip;
56872 + u32 saved_ip;
56873 + u32 gr_saddr;
56874 + u32 gr_daddr;
56875 + u16 gr_sport;
56876 + u16 gr_dport;
56877 + u8 used_accept:1;
56878 +#endif
56879 +
56880 int oom_adj; /* OOM kill score adjustment (bit shift) */
56881 };
56882
56883 @@ -723,6 +737,11 @@ struct user_struct {
56884 struct key *session_keyring; /* UID's default session keyring */
56885 #endif
56886
56887 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56888 + unsigned int banned;
56889 + unsigned long ban_expires;
56890 +#endif
56891 +
56892 /* Hash table maintenance information */
56893 struct hlist_node uidhash_node;
56894 uid_t uid;
56895 @@ -1328,8 +1347,8 @@ struct task_struct {
56896 struct list_head thread_group;
56897
56898 struct completion *vfork_done; /* for vfork() */
56899 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
56900 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56901 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
56902 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56903
56904 cputime_t utime, stime, utimescaled, stimescaled;
56905 cputime_t gtime;
56906 @@ -1343,16 +1362,6 @@ struct task_struct {
56907 struct task_cputime cputime_expires;
56908 struct list_head cpu_timers[3];
56909
56910 -/* process credentials */
56911 - const struct cred *real_cred; /* objective and real subjective task
56912 - * credentials (COW) */
56913 - const struct cred *cred; /* effective (overridable) subjective task
56914 - * credentials (COW) */
56915 - struct mutex cred_guard_mutex; /* guard against foreign influences on
56916 - * credential calculations
56917 - * (notably. ptrace) */
56918 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56919 -
56920 char comm[TASK_COMM_LEN]; /* executable name excluding path
56921 - access with [gs]et_task_comm (which lock
56922 it with task_lock())
56923 @@ -1369,6 +1378,10 @@ struct task_struct {
56924 #endif
56925 /* CPU-specific state of this task */
56926 struct thread_struct thread;
56927 +/* thread_info moved to task_struct */
56928 +#ifdef CONFIG_X86
56929 + struct thread_info tinfo;
56930 +#endif
56931 /* filesystem information */
56932 struct fs_struct *fs;
56933 /* open file information */
56934 @@ -1436,6 +1449,15 @@ struct task_struct {
56935 int hardirq_context;
56936 int softirq_context;
56937 #endif
56938 +
56939 +/* process credentials */
56940 + const struct cred *real_cred; /* objective and real subjective task
56941 + * credentials (COW) */
56942 + struct mutex cred_guard_mutex; /* guard against foreign influences on
56943 + * credential calculations
56944 + * (notably. ptrace) */
56945 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56946 +
56947 #ifdef CONFIG_LOCKDEP
56948 # define MAX_LOCK_DEPTH 48UL
56949 u64 curr_chain_key;
56950 @@ -1456,6 +1478,9 @@ struct task_struct {
56951
56952 struct backing_dev_info *backing_dev_info;
56953
56954 + const struct cred *cred; /* effective (overridable) subjective task
56955 + * credentials (COW) */
56956 +
56957 struct io_context *io_context;
56958
56959 unsigned long ptrace_message;
56960 @@ -1519,6 +1544,21 @@ struct task_struct {
56961 unsigned long default_timer_slack_ns;
56962
56963 struct list_head *scm_work_list;
56964 +
56965 +#ifdef CONFIG_GRKERNSEC
56966 + /* grsecurity */
56967 + struct dentry *gr_chroot_dentry;
56968 + struct acl_subject_label *acl;
56969 + struct acl_role_label *role;
56970 + struct file *exec_file;
56971 + u16 acl_role_id;
56972 + /* is this the task that authenticated to the special role */
56973 + u8 acl_sp_role;
56974 + u8 is_writable;
56975 + u8 brute;
56976 + u8 gr_is_chrooted;
56977 +#endif
56978 +
56979 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
56980 /* Index of current stored adress in ret_stack */
56981 int curr_ret_stack;
56982 @@ -1542,6 +1582,57 @@ struct task_struct {
56983 #endif /* CONFIG_TRACING */
56984 };
56985
56986 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
56987 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
56988 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
56989 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
56990 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
56991 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
56992 +
56993 +#ifdef CONFIG_PAX_SOFTMODE
56994 +extern unsigned int pax_softmode;
56995 +#endif
56996 +
56997 +extern int pax_check_flags(unsigned long *);
56998 +
56999 +/* if tsk != current then task_lock must be held on it */
57000 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57001 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
57002 +{
57003 + if (likely(tsk->mm))
57004 + return tsk->mm->pax_flags;
57005 + else
57006 + return 0UL;
57007 +}
57008 +
57009 +/* if tsk != current then task_lock must be held on it */
57010 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
57011 +{
57012 + if (likely(tsk->mm)) {
57013 + tsk->mm->pax_flags = flags;
57014 + return 0;
57015 + }
57016 + return -EINVAL;
57017 +}
57018 +#endif
57019 +
57020 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57021 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
57022 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
57023 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
57024 +#endif
57025 +
57026 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
57027 +void pax_report_insns(void *pc, void *sp);
57028 +void pax_report_refcount_overflow(struct pt_regs *regs);
57029 +void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
57030 +
57031 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
57032 +extern void pax_track_stack(void);
57033 +#else
57034 +static inline void pax_track_stack(void) {}
57035 +#endif
57036 +
57037 /* Future-safe accessor for struct task_struct's cpus_allowed. */
57038 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
57039
57040 @@ -1978,7 +2069,9 @@ void yield(void);
57041 extern struct exec_domain default_exec_domain;
57042
57043 union thread_union {
57044 +#ifndef CONFIG_X86
57045 struct thread_info thread_info;
57046 +#endif
57047 unsigned long stack[THREAD_SIZE/sizeof(long)];
57048 };
57049
57050 @@ -2155,7 +2248,7 @@ extern void __cleanup_sighand(struct sig
57051 extern void exit_itimers(struct signal_struct *);
57052 extern void flush_itimer_signals(void);
57053
57054 -extern NORET_TYPE void do_group_exit(int);
57055 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
57056
57057 extern void daemonize(const char *, ...);
57058 extern int allow_signal(int);
57059 @@ -2284,13 +2377,17 @@ static inline unsigned long *end_of_stac
57060
57061 #endif
57062
57063 -static inline int object_is_on_stack(void *obj)
57064 +static inline int object_starts_on_stack(void *obj)
57065 {
57066 - void *stack = task_stack_page(current);
57067 + const void *stack = task_stack_page(current);
57068
57069 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
57070 }
57071
57072 +#ifdef CONFIG_PAX_USERCOPY
57073 +extern int object_is_on_stack(const void *obj, unsigned long len);
57074 +#endif
57075 +
57076 extern void thread_info_cache_init(void);
57077
57078 #ifdef CONFIG_DEBUG_STACK_USAGE
57079 diff -urNp linux-2.6.32.42/include/linux/screen_info.h linux-2.6.32.42/include/linux/screen_info.h
57080 --- linux-2.6.32.42/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
57081 +++ linux-2.6.32.42/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
57082 @@ -42,7 +42,8 @@ struct screen_info {
57083 __u16 pages; /* 0x32 */
57084 __u16 vesa_attributes; /* 0x34 */
57085 __u32 capabilities; /* 0x36 */
57086 - __u8 _reserved[6]; /* 0x3a */
57087 + __u16 vesapm_size; /* 0x3a */
57088 + __u8 _reserved[4]; /* 0x3c */
57089 } __attribute__((packed));
57090
57091 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
57092 diff -urNp linux-2.6.32.42/include/linux/security.h linux-2.6.32.42/include/linux/security.h
57093 --- linux-2.6.32.42/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
57094 +++ linux-2.6.32.42/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
57095 @@ -34,6 +34,7 @@
57096 #include <linux/key.h>
57097 #include <linux/xfrm.h>
57098 #include <linux/gfp.h>
57099 +#include <linux/grsecurity.h>
57100 #include <net/flow.h>
57101
57102 /* Maximum number of letters for an LSM name string */
57103 diff -urNp linux-2.6.32.42/include/linux/shm.h linux-2.6.32.42/include/linux/shm.h
57104 --- linux-2.6.32.42/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
57105 +++ linux-2.6.32.42/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
57106 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
57107 pid_t shm_cprid;
57108 pid_t shm_lprid;
57109 struct user_struct *mlock_user;
57110 +#ifdef CONFIG_GRKERNSEC
57111 + time_t shm_createtime;
57112 + pid_t shm_lapid;
57113 +#endif
57114 };
57115
57116 /* shm_mode upper byte flags */
57117 diff -urNp linux-2.6.32.42/include/linux/skbuff.h linux-2.6.32.42/include/linux/skbuff.h
57118 --- linux-2.6.32.42/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
57119 +++ linux-2.6.32.42/include/linux/skbuff.h 2011-05-04 17:56:20.000000000 -0400
57120 @@ -544,7 +544,7 @@ static inline union skb_shared_tx *skb_t
57121 */
57122 static inline int skb_queue_empty(const struct sk_buff_head *list)
57123 {
57124 - return list->next == (struct sk_buff *)list;
57125 + return list->next == (const struct sk_buff *)list;
57126 }
57127
57128 /**
57129 @@ -557,7 +557,7 @@ static inline int skb_queue_empty(const
57130 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
57131 const struct sk_buff *skb)
57132 {
57133 - return (skb->next == (struct sk_buff *) list);
57134 + return (skb->next == (const struct sk_buff *) list);
57135 }
57136
57137 /**
57138 @@ -570,7 +570,7 @@ static inline bool skb_queue_is_last(con
57139 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
57140 const struct sk_buff *skb)
57141 {
57142 - return (skb->prev == (struct sk_buff *) list);
57143 + return (skb->prev == (const struct sk_buff *) list);
57144 }
57145
57146 /**
57147 @@ -1367,7 +1367,7 @@ static inline int skb_network_offset(con
57148 * headroom, you should not reduce this.
57149 */
57150 #ifndef NET_SKB_PAD
57151 -#define NET_SKB_PAD 32
57152 +#define NET_SKB_PAD (_AC(32,U))
57153 #endif
57154
57155 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
57156 diff -urNp linux-2.6.32.42/include/linux/slab_def.h linux-2.6.32.42/include/linux/slab_def.h
57157 --- linux-2.6.32.42/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
57158 +++ linux-2.6.32.42/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
57159 @@ -69,10 +69,10 @@ struct kmem_cache {
57160 unsigned long node_allocs;
57161 unsigned long node_frees;
57162 unsigned long node_overflow;
57163 - atomic_t allochit;
57164 - atomic_t allocmiss;
57165 - atomic_t freehit;
57166 - atomic_t freemiss;
57167 + atomic_unchecked_t allochit;
57168 + atomic_unchecked_t allocmiss;
57169 + atomic_unchecked_t freehit;
57170 + atomic_unchecked_t freemiss;
57171
57172 /*
57173 * If debugging is enabled, then the allocator can add additional
57174 diff -urNp linux-2.6.32.42/include/linux/slab.h linux-2.6.32.42/include/linux/slab.h
57175 --- linux-2.6.32.42/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
57176 +++ linux-2.6.32.42/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
57177 @@ -11,12 +11,20 @@
57178
57179 #include <linux/gfp.h>
57180 #include <linux/types.h>
57181 +#include <linux/err.h>
57182
57183 /*
57184 * Flags to pass to kmem_cache_create().
57185 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
57186 */
57187 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
57188 +
57189 +#ifdef CONFIG_PAX_USERCOPY
57190 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
57191 +#else
57192 +#define SLAB_USERCOPY 0x00000000UL
57193 +#endif
57194 +
57195 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
57196 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
57197 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
57198 @@ -82,10 +90,13 @@
57199 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
57200 * Both make kfree a no-op.
57201 */
57202 -#define ZERO_SIZE_PTR ((void *)16)
57203 +#define ZERO_SIZE_PTR \
57204 +({ \
57205 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
57206 + (void *)(-MAX_ERRNO-1L); \
57207 +})
57208
57209 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
57210 - (unsigned long)ZERO_SIZE_PTR)
57211 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
57212
57213 /*
57214 * struct kmem_cache related prototypes
57215 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void
57216 void kfree(const void *);
57217 void kzfree(const void *);
57218 size_t ksize(const void *);
57219 +void check_object_size(const void *ptr, unsigned long n, bool to);
57220
57221 /*
57222 * Allocator specific definitions. These are mainly used to establish optimized
57223 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
57224
57225 void __init kmem_cache_init_late(void);
57226
57227 +#define kmalloc(x, y) \
57228 +({ \
57229 + void *___retval; \
57230 + intoverflow_t ___x = (intoverflow_t)x; \
57231 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
57232 + ___retval = NULL; \
57233 + else \
57234 + ___retval = kmalloc((size_t)___x, (y)); \
57235 + ___retval; \
57236 +})
57237 +
57238 +#define kmalloc_node(x, y, z) \
57239 +({ \
57240 + void *___retval; \
57241 + intoverflow_t ___x = (intoverflow_t)x; \
57242 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
57243 + ___retval = NULL; \
57244 + else \
57245 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
57246 + ___retval; \
57247 +})
57248 +
57249 +#define kzalloc(x, y) \
57250 +({ \
57251 + void *___retval; \
57252 + intoverflow_t ___x = (intoverflow_t)x; \
57253 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
57254 + ___retval = NULL; \
57255 + else \
57256 + ___retval = kzalloc((size_t)___x, (y)); \
57257 + ___retval; \
57258 +})
57259 +
57260 #endif /* _LINUX_SLAB_H */
57261 diff -urNp linux-2.6.32.42/include/linux/slub_def.h linux-2.6.32.42/include/linux/slub_def.h
57262 --- linux-2.6.32.42/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
57263 +++ linux-2.6.32.42/include/linux/slub_def.h 2011-04-17 15:56:46.000000000 -0400
57264 @@ -86,7 +86,7 @@ struct kmem_cache {
57265 struct kmem_cache_order_objects max;
57266 struct kmem_cache_order_objects min;
57267 gfp_t allocflags; /* gfp flags to use on each alloc */
57268 - int refcount; /* Refcount for slab cache destroy */
57269 + atomic_t refcount; /* Refcount for slab cache destroy */
57270 void (*ctor)(void *);
57271 int inuse; /* Offset to metadata */
57272 int align; /* Alignment */
57273 diff -urNp linux-2.6.32.42/include/linux/sonet.h linux-2.6.32.42/include/linux/sonet.h
57274 --- linux-2.6.32.42/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
57275 +++ linux-2.6.32.42/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
57276 @@ -61,7 +61,7 @@ struct sonet_stats {
57277 #include <asm/atomic.h>
57278
57279 struct k_sonet_stats {
57280 -#define __HANDLE_ITEM(i) atomic_t i
57281 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57282 __SONET_ITEMS
57283 #undef __HANDLE_ITEM
57284 };
57285 diff -urNp linux-2.6.32.42/include/linux/sunrpc/clnt.h linux-2.6.32.42/include/linux/sunrpc/clnt.h
57286 --- linux-2.6.32.42/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
57287 +++ linux-2.6.32.42/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
57288 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
57289 {
57290 switch (sap->sa_family) {
57291 case AF_INET:
57292 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
57293 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
57294 case AF_INET6:
57295 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
57296 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
57297 }
57298 return 0;
57299 }
57300 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
57301 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
57302 const struct sockaddr *src)
57303 {
57304 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
57305 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
57306 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
57307
57308 dsin->sin_family = ssin->sin_family;
57309 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
57310 if (sa->sa_family != AF_INET6)
57311 return 0;
57312
57313 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
57314 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
57315 }
57316
57317 #endif /* __KERNEL__ */
57318 diff -urNp linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h
57319 --- linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
57320 +++ linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
57321 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
57322 extern unsigned int svcrdma_max_requests;
57323 extern unsigned int svcrdma_max_req_size;
57324
57325 -extern atomic_t rdma_stat_recv;
57326 -extern atomic_t rdma_stat_read;
57327 -extern atomic_t rdma_stat_write;
57328 -extern atomic_t rdma_stat_sq_starve;
57329 -extern atomic_t rdma_stat_rq_starve;
57330 -extern atomic_t rdma_stat_rq_poll;
57331 -extern atomic_t rdma_stat_rq_prod;
57332 -extern atomic_t rdma_stat_sq_poll;
57333 -extern atomic_t rdma_stat_sq_prod;
57334 +extern atomic_unchecked_t rdma_stat_recv;
57335 +extern atomic_unchecked_t rdma_stat_read;
57336 +extern atomic_unchecked_t rdma_stat_write;
57337 +extern atomic_unchecked_t rdma_stat_sq_starve;
57338 +extern atomic_unchecked_t rdma_stat_rq_starve;
57339 +extern atomic_unchecked_t rdma_stat_rq_poll;
57340 +extern atomic_unchecked_t rdma_stat_rq_prod;
57341 +extern atomic_unchecked_t rdma_stat_sq_poll;
57342 +extern atomic_unchecked_t rdma_stat_sq_prod;
57343
57344 #define RPCRDMA_VERSION 1
57345
57346 diff -urNp linux-2.6.32.42/include/linux/suspend.h linux-2.6.32.42/include/linux/suspend.h
57347 --- linux-2.6.32.42/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
57348 +++ linux-2.6.32.42/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
57349 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
57350 * which require special recovery actions in that situation.
57351 */
57352 struct platform_suspend_ops {
57353 - int (*valid)(suspend_state_t state);
57354 - int (*begin)(suspend_state_t state);
57355 - int (*prepare)(void);
57356 - int (*prepare_late)(void);
57357 - int (*enter)(suspend_state_t state);
57358 - void (*wake)(void);
57359 - void (*finish)(void);
57360 - void (*end)(void);
57361 - void (*recover)(void);
57362 + int (* const valid)(suspend_state_t state);
57363 + int (* const begin)(suspend_state_t state);
57364 + int (* const prepare)(void);
57365 + int (* const prepare_late)(void);
57366 + int (* const enter)(suspend_state_t state);
57367 + void (* const wake)(void);
57368 + void (* const finish)(void);
57369 + void (* const end)(void);
57370 + void (* const recover)(void);
57371 };
57372
57373 #ifdef CONFIG_SUSPEND
57374 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
57375 * suspend_set_ops - set platform dependent suspend operations
57376 * @ops: The new suspend operations to set.
57377 */
57378 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
57379 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
57380 extern int suspend_valid_only_mem(suspend_state_t state);
57381
57382 /**
57383 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
57384 #else /* !CONFIG_SUSPEND */
57385 #define suspend_valid_only_mem NULL
57386
57387 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
57388 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
57389 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
57390 #endif /* !CONFIG_SUSPEND */
57391
57392 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
57393 * platforms which require special recovery actions in that situation.
57394 */
57395 struct platform_hibernation_ops {
57396 - int (*begin)(void);
57397 - void (*end)(void);
57398 - int (*pre_snapshot)(void);
57399 - void (*finish)(void);
57400 - int (*prepare)(void);
57401 - int (*enter)(void);
57402 - void (*leave)(void);
57403 - int (*pre_restore)(void);
57404 - void (*restore_cleanup)(void);
57405 - void (*recover)(void);
57406 + int (* const begin)(void);
57407 + void (* const end)(void);
57408 + int (* const pre_snapshot)(void);
57409 + void (* const finish)(void);
57410 + int (* const prepare)(void);
57411 + int (* const enter)(void);
57412 + void (* const leave)(void);
57413 + int (* const pre_restore)(void);
57414 + void (* const restore_cleanup)(void);
57415 + void (* const recover)(void);
57416 };
57417
57418 #ifdef CONFIG_HIBERNATION
57419 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
57420 extern void swsusp_unset_page_free(struct page *);
57421 extern unsigned long get_safe_page(gfp_t gfp_mask);
57422
57423 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
57424 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
57425 extern int hibernate(void);
57426 extern bool system_entering_hibernation(void);
57427 #else /* CONFIG_HIBERNATION */
57428 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
57429 static inline void swsusp_set_page_free(struct page *p) {}
57430 static inline void swsusp_unset_page_free(struct page *p) {}
57431
57432 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
57433 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
57434 static inline int hibernate(void) { return -ENOSYS; }
57435 static inline bool system_entering_hibernation(void) { return false; }
57436 #endif /* CONFIG_HIBERNATION */
57437 diff -urNp linux-2.6.32.42/include/linux/sysctl.h linux-2.6.32.42/include/linux/sysctl.h
57438 --- linux-2.6.32.42/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
57439 +++ linux-2.6.32.42/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
57440 @@ -164,7 +164,11 @@ enum
57441 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
57442 };
57443
57444 -
57445 +#ifdef CONFIG_PAX_SOFTMODE
57446 +enum {
57447 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
57448 +};
57449 +#endif
57450
57451 /* CTL_VM names: */
57452 enum
57453 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
57454
57455 extern int proc_dostring(struct ctl_table *, int,
57456 void __user *, size_t *, loff_t *);
57457 +extern int proc_dostring_modpriv(struct ctl_table *, int,
57458 + void __user *, size_t *, loff_t *);
57459 extern int proc_dointvec(struct ctl_table *, int,
57460 void __user *, size_t *, loff_t *);
57461 extern int proc_dointvec_minmax(struct ctl_table *, int,
57462 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
57463
57464 extern ctl_handler sysctl_data;
57465 extern ctl_handler sysctl_string;
57466 +extern ctl_handler sysctl_string_modpriv;
57467 extern ctl_handler sysctl_intvec;
57468 extern ctl_handler sysctl_jiffies;
57469 extern ctl_handler sysctl_ms_jiffies;
57470 diff -urNp linux-2.6.32.42/include/linux/sysfs.h linux-2.6.32.42/include/linux/sysfs.h
57471 --- linux-2.6.32.42/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
57472 +++ linux-2.6.32.42/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
57473 @@ -75,8 +75,8 @@ struct bin_attribute {
57474 };
57475
57476 struct sysfs_ops {
57477 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
57478 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
57479 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
57480 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
57481 };
57482
57483 struct sysfs_dirent;
57484 diff -urNp linux-2.6.32.42/include/linux/thread_info.h linux-2.6.32.42/include/linux/thread_info.h
57485 --- linux-2.6.32.42/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
57486 +++ linux-2.6.32.42/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
57487 @@ -23,7 +23,7 @@ struct restart_block {
57488 };
57489 /* For futex_wait and futex_wait_requeue_pi */
57490 struct {
57491 - u32 *uaddr;
57492 + u32 __user *uaddr;
57493 u32 val;
57494 u32 flags;
57495 u32 bitset;
57496 diff -urNp linux-2.6.32.42/include/linux/tty.h linux-2.6.32.42/include/linux/tty.h
57497 --- linux-2.6.32.42/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
57498 +++ linux-2.6.32.42/include/linux/tty.h 2011-04-17 15:56:46.000000000 -0400
57499 @@ -13,6 +13,7 @@
57500 #include <linux/tty_driver.h>
57501 #include <linux/tty_ldisc.h>
57502 #include <linux/mutex.h>
57503 +#include <linux/poll.h>
57504
57505 #include <asm/system.h>
57506
57507 @@ -443,7 +444,6 @@ extern int tty_perform_flush(struct tty_
57508 extern dev_t tty_devnum(struct tty_struct *tty);
57509 extern void proc_clear_tty(struct task_struct *p);
57510 extern struct tty_struct *get_current_tty(void);
57511 -extern void tty_default_fops(struct file_operations *fops);
57512 extern struct tty_struct *alloc_tty_struct(void);
57513 extern void free_tty_struct(struct tty_struct *tty);
57514 extern void initialize_tty_struct(struct tty_struct *tty,
57515 @@ -493,6 +493,18 @@ extern void tty_ldisc_begin(void);
57516 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
57517 extern void tty_ldisc_enable(struct tty_struct *tty);
57518
57519 +/* tty_io.c */
57520 +extern ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
57521 +extern ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
57522 +extern unsigned int tty_poll(struct file *, poll_table *);
57523 +#ifdef CONFIG_COMPAT
57524 +extern long tty_compat_ioctl(struct file *file, unsigned int cmd,
57525 + unsigned long arg);
57526 +#else
57527 +#define tty_compat_ioctl NULL
57528 +#endif
57529 +extern int tty_release(struct inode *, struct file *);
57530 +extern int tty_fasync(int fd, struct file *filp, int on);
57531
57532 /* n_tty.c */
57533 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
57534 diff -urNp linux-2.6.32.42/include/linux/tty_ldisc.h linux-2.6.32.42/include/linux/tty_ldisc.h
57535 --- linux-2.6.32.42/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
57536 +++ linux-2.6.32.42/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
57537 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
57538
57539 struct module *owner;
57540
57541 - int refcount;
57542 + atomic_t refcount;
57543 };
57544
57545 struct tty_ldisc {
57546 diff -urNp linux-2.6.32.42/include/linux/types.h linux-2.6.32.42/include/linux/types.h
57547 --- linux-2.6.32.42/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
57548 +++ linux-2.6.32.42/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
57549 @@ -191,10 +191,26 @@ typedef struct {
57550 volatile int counter;
57551 } atomic_t;
57552
57553 +#ifdef CONFIG_PAX_REFCOUNT
57554 +typedef struct {
57555 + volatile int counter;
57556 +} atomic_unchecked_t;
57557 +#else
57558 +typedef atomic_t atomic_unchecked_t;
57559 +#endif
57560 +
57561 #ifdef CONFIG_64BIT
57562 typedef struct {
57563 volatile long counter;
57564 } atomic64_t;
57565 +
57566 +#ifdef CONFIG_PAX_REFCOUNT
57567 +typedef struct {
57568 + volatile long counter;
57569 +} atomic64_unchecked_t;
57570 +#else
57571 +typedef atomic64_t atomic64_unchecked_t;
57572 +#endif
57573 #endif
57574
57575 struct ustat {
57576 diff -urNp linux-2.6.32.42/include/linux/uaccess.h linux-2.6.32.42/include/linux/uaccess.h
57577 --- linux-2.6.32.42/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
57578 +++ linux-2.6.32.42/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
57579 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
57580 long ret; \
57581 mm_segment_t old_fs = get_fs(); \
57582 \
57583 - set_fs(KERNEL_DS); \
57584 pagefault_disable(); \
57585 + set_fs(KERNEL_DS); \
57586 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
57587 - pagefault_enable(); \
57588 set_fs(old_fs); \
57589 + pagefault_enable(); \
57590 ret; \
57591 })
57592
57593 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
57594 * Safely read from address @src to the buffer at @dst. If a kernel fault
57595 * happens, handle that and return -EFAULT.
57596 */
57597 -extern long probe_kernel_read(void *dst, void *src, size_t size);
57598 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
57599
57600 /*
57601 * probe_kernel_write(): safely attempt to write to a location
57602 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
57603 * Safely write to address @dst from the buffer at @src. If a kernel fault
57604 * happens, handle that and return -EFAULT.
57605 */
57606 -extern long probe_kernel_write(void *dst, void *src, size_t size);
57607 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
57608
57609 #endif /* __LINUX_UACCESS_H__ */
57610 diff -urNp linux-2.6.32.42/include/linux/unaligned/access_ok.h linux-2.6.32.42/include/linux/unaligned/access_ok.h
57611 --- linux-2.6.32.42/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
57612 +++ linux-2.6.32.42/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
57613 @@ -6,32 +6,32 @@
57614
57615 static inline u16 get_unaligned_le16(const void *p)
57616 {
57617 - return le16_to_cpup((__le16 *)p);
57618 + return le16_to_cpup((const __le16 *)p);
57619 }
57620
57621 static inline u32 get_unaligned_le32(const void *p)
57622 {
57623 - return le32_to_cpup((__le32 *)p);
57624 + return le32_to_cpup((const __le32 *)p);
57625 }
57626
57627 static inline u64 get_unaligned_le64(const void *p)
57628 {
57629 - return le64_to_cpup((__le64 *)p);
57630 + return le64_to_cpup((const __le64 *)p);
57631 }
57632
57633 static inline u16 get_unaligned_be16(const void *p)
57634 {
57635 - return be16_to_cpup((__be16 *)p);
57636 + return be16_to_cpup((const __be16 *)p);
57637 }
57638
57639 static inline u32 get_unaligned_be32(const void *p)
57640 {
57641 - return be32_to_cpup((__be32 *)p);
57642 + return be32_to_cpup((const __be32 *)p);
57643 }
57644
57645 static inline u64 get_unaligned_be64(const void *p)
57646 {
57647 - return be64_to_cpup((__be64 *)p);
57648 + return be64_to_cpup((const __be64 *)p);
57649 }
57650
57651 static inline void put_unaligned_le16(u16 val, void *p)
57652 diff -urNp linux-2.6.32.42/include/linux/vmalloc.h linux-2.6.32.42/include/linux/vmalloc.h
57653 --- linux-2.6.32.42/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
57654 +++ linux-2.6.32.42/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
57655 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
57656 #define VM_MAP 0x00000004 /* vmap()ed pages */
57657 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
57658 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
57659 +
57660 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
57661 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
57662 +#endif
57663 +
57664 /* bits [20..32] reserved for arch specific ioremap internals */
57665
57666 /*
57667 @@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
57668
57669 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
57670
57671 +#define vmalloc(x) \
57672 +({ \
57673 + void *___retval; \
57674 + intoverflow_t ___x = (intoverflow_t)x; \
57675 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
57676 + ___retval = NULL; \
57677 + else \
57678 + ___retval = vmalloc((unsigned long)___x); \
57679 + ___retval; \
57680 +})
57681 +
57682 +#define __vmalloc(x, y, z) \
57683 +({ \
57684 + void *___retval; \
57685 + intoverflow_t ___x = (intoverflow_t)x; \
57686 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
57687 + ___retval = NULL; \
57688 + else \
57689 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
57690 + ___retval; \
57691 +})
57692 +
57693 +#define vmalloc_user(x) \
57694 +({ \
57695 + void *___retval; \
57696 + intoverflow_t ___x = (intoverflow_t)x; \
57697 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
57698 + ___retval = NULL; \
57699 + else \
57700 + ___retval = vmalloc_user((unsigned long)___x); \
57701 + ___retval; \
57702 +})
57703 +
57704 +#define vmalloc_exec(x) \
57705 +({ \
57706 + void *___retval; \
57707 + intoverflow_t ___x = (intoverflow_t)x; \
57708 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
57709 + ___retval = NULL; \
57710 + else \
57711 + ___retval = vmalloc_exec((unsigned long)___x); \
57712 + ___retval; \
57713 +})
57714 +
57715 +#define vmalloc_node(x, y) \
57716 +({ \
57717 + void *___retval; \
57718 + intoverflow_t ___x = (intoverflow_t)x; \
57719 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
57720 + ___retval = NULL; \
57721 + else \
57722 + ___retval = vmalloc_node((unsigned long)___x, (y));\
57723 + ___retval; \
57724 +})
57725 +
57726 +#define vmalloc_32(x) \
57727 +({ \
57728 + void *___retval; \
57729 + intoverflow_t ___x = (intoverflow_t)x; \
57730 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
57731 + ___retval = NULL; \
57732 + else \
57733 + ___retval = vmalloc_32((unsigned long)___x); \
57734 + ___retval; \
57735 +})
57736 +
57737 +#define vmalloc_32_user(x) \
57738 +({ \
57739 + void *___retval; \
57740 + intoverflow_t ___x = (intoverflow_t)x; \
57741 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
57742 + ___retval = NULL; \
57743 + else \
57744 + ___retval = vmalloc_32_user((unsigned long)___x);\
57745 + ___retval; \
57746 +})
57747 +
57748 #endif /* _LINUX_VMALLOC_H */
57749 diff -urNp linux-2.6.32.42/include/linux/vmstat.h linux-2.6.32.42/include/linux/vmstat.h
57750 --- linux-2.6.32.42/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
57751 +++ linux-2.6.32.42/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
57752 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
57753 /*
57754 * Zone based page accounting with per cpu differentials.
57755 */
57756 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57757 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57758
57759 static inline void zone_page_state_add(long x, struct zone *zone,
57760 enum zone_stat_item item)
57761 {
57762 - atomic_long_add(x, &zone->vm_stat[item]);
57763 - atomic_long_add(x, &vm_stat[item]);
57764 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
57765 + atomic_long_add_unchecked(x, &vm_stat[item]);
57766 }
57767
57768 static inline unsigned long global_page_state(enum zone_stat_item item)
57769 {
57770 - long x = atomic_long_read(&vm_stat[item]);
57771 + long x = atomic_long_read_unchecked(&vm_stat[item]);
57772 #ifdef CONFIG_SMP
57773 if (x < 0)
57774 x = 0;
57775 @@ -158,7 +158,7 @@ static inline unsigned long global_page_
57776 static inline unsigned long zone_page_state(struct zone *zone,
57777 enum zone_stat_item item)
57778 {
57779 - long x = atomic_long_read(&zone->vm_stat[item]);
57780 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57781 #ifdef CONFIG_SMP
57782 if (x < 0)
57783 x = 0;
57784 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
57785 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
57786 enum zone_stat_item item)
57787 {
57788 - long x = atomic_long_read(&zone->vm_stat[item]);
57789 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57790
57791 #ifdef CONFIG_SMP
57792 int cpu;
57793 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
57794
57795 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
57796 {
57797 - atomic_long_inc(&zone->vm_stat[item]);
57798 - atomic_long_inc(&vm_stat[item]);
57799 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
57800 + atomic_long_inc_unchecked(&vm_stat[item]);
57801 }
57802
57803 static inline void __inc_zone_page_state(struct page *page,
57804 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
57805
57806 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
57807 {
57808 - atomic_long_dec(&zone->vm_stat[item]);
57809 - atomic_long_dec(&vm_stat[item]);
57810 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
57811 + atomic_long_dec_unchecked(&vm_stat[item]);
57812 }
57813
57814 static inline void __dec_zone_page_state(struct page *page,
57815 diff -urNp linux-2.6.32.42/include/media/v4l2-device.h linux-2.6.32.42/include/media/v4l2-device.h
57816 --- linux-2.6.32.42/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
57817 +++ linux-2.6.32.42/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
57818 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
57819 this function returns 0. If the name ends with a digit (e.g. cx18),
57820 then the name will be set to cx18-0 since cx180 looks really odd. */
57821 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
57822 - atomic_t *instance);
57823 + atomic_unchecked_t *instance);
57824
57825 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
57826 Since the parent disappears this ensures that v4l2_dev doesn't have an
57827 diff -urNp linux-2.6.32.42/include/net/flow.h linux-2.6.32.42/include/net/flow.h
57828 --- linux-2.6.32.42/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
57829 +++ linux-2.6.32.42/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
57830 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
57831 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
57832 u8 dir, flow_resolve_t resolver);
57833 extern void flow_cache_flush(void);
57834 -extern atomic_t flow_cache_genid;
57835 +extern atomic_unchecked_t flow_cache_genid;
57836
57837 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
57838 {
57839 diff -urNp linux-2.6.32.42/include/net/inetpeer.h linux-2.6.32.42/include/net/inetpeer.h
57840 --- linux-2.6.32.42/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
57841 +++ linux-2.6.32.42/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
57842 @@ -24,7 +24,7 @@ struct inet_peer
57843 __u32 dtime; /* the time of last use of not
57844 * referenced entries */
57845 atomic_t refcnt;
57846 - atomic_t rid; /* Frag reception counter */
57847 + atomic_unchecked_t rid; /* Frag reception counter */
57848 __u32 tcp_ts;
57849 unsigned long tcp_ts_stamp;
57850 };
57851 diff -urNp linux-2.6.32.42/include/net/ip_vs.h linux-2.6.32.42/include/net/ip_vs.h
57852 --- linux-2.6.32.42/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
57853 +++ linux-2.6.32.42/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
57854 @@ -365,7 +365,7 @@ struct ip_vs_conn {
57855 struct ip_vs_conn *control; /* Master control connection */
57856 atomic_t n_control; /* Number of controlled ones */
57857 struct ip_vs_dest *dest; /* real server */
57858 - atomic_t in_pkts; /* incoming packet counter */
57859 + atomic_unchecked_t in_pkts; /* incoming packet counter */
57860
57861 /* packet transmitter for different forwarding methods. If it
57862 mangles the packet, it must return NF_DROP or better NF_STOLEN,
57863 @@ -466,7 +466,7 @@ struct ip_vs_dest {
57864 union nf_inet_addr addr; /* IP address of the server */
57865 __be16 port; /* port number of the server */
57866 volatile unsigned flags; /* dest status flags */
57867 - atomic_t conn_flags; /* flags to copy to conn */
57868 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
57869 atomic_t weight; /* server weight */
57870
57871 atomic_t refcnt; /* reference counter */
57872 diff -urNp linux-2.6.32.42/include/net/irda/ircomm_tty.h linux-2.6.32.42/include/net/irda/ircomm_tty.h
57873 --- linux-2.6.32.42/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
57874 +++ linux-2.6.32.42/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
57875 @@ -35,6 +35,7 @@
57876 #include <linux/termios.h>
57877 #include <linux/timer.h>
57878 #include <linux/tty.h> /* struct tty_struct */
57879 +#include <asm/local.h>
57880
57881 #include <net/irda/irias_object.h>
57882 #include <net/irda/ircomm_core.h>
57883 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
57884 unsigned short close_delay;
57885 unsigned short closing_wait; /* time to wait before closing */
57886
57887 - int open_count;
57888 - int blocked_open; /* # of blocked opens */
57889 + local_t open_count;
57890 + local_t blocked_open; /* # of blocked opens */
57891
57892 /* Protect concurent access to :
57893 * o self->open_count
57894 diff -urNp linux-2.6.32.42/include/net/iucv/af_iucv.h linux-2.6.32.42/include/net/iucv/af_iucv.h
57895 --- linux-2.6.32.42/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
57896 +++ linux-2.6.32.42/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
57897 @@ -87,7 +87,7 @@ struct iucv_sock {
57898 struct iucv_sock_list {
57899 struct hlist_head head;
57900 rwlock_t lock;
57901 - atomic_t autobind_name;
57902 + atomic_unchecked_t autobind_name;
57903 };
57904
57905 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
57906 diff -urNp linux-2.6.32.42/include/net/neighbour.h linux-2.6.32.42/include/net/neighbour.h
57907 --- linux-2.6.32.42/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
57908 +++ linux-2.6.32.42/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
57909 @@ -125,12 +125,12 @@ struct neighbour
57910 struct neigh_ops
57911 {
57912 int family;
57913 - void (*solicit)(struct neighbour *, struct sk_buff*);
57914 - void (*error_report)(struct neighbour *, struct sk_buff*);
57915 - int (*output)(struct sk_buff*);
57916 - int (*connected_output)(struct sk_buff*);
57917 - int (*hh_output)(struct sk_buff*);
57918 - int (*queue_xmit)(struct sk_buff*);
57919 + void (* const solicit)(struct neighbour *, struct sk_buff*);
57920 + void (* const error_report)(struct neighbour *, struct sk_buff*);
57921 + int (* const output)(struct sk_buff*);
57922 + int (* const connected_output)(struct sk_buff*);
57923 + int (* const hh_output)(struct sk_buff*);
57924 + int (* const queue_xmit)(struct sk_buff*);
57925 };
57926
57927 struct pneigh_entry
57928 diff -urNp linux-2.6.32.42/include/net/netlink.h linux-2.6.32.42/include/net/netlink.h
57929 --- linux-2.6.32.42/include/net/netlink.h 2011-03-27 14:31:47.000000000 -0400
57930 +++ linux-2.6.32.42/include/net/netlink.h 2011-04-17 15:56:46.000000000 -0400
57931 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
57932 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
57933 {
57934 if (mark)
57935 - skb_trim(skb, (unsigned char *) mark - skb->data);
57936 + skb_trim(skb, (const unsigned char *) mark - skb->data);
57937 }
57938
57939 /**
57940 diff -urNp linux-2.6.32.42/include/net/netns/ipv4.h linux-2.6.32.42/include/net/netns/ipv4.h
57941 --- linux-2.6.32.42/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
57942 +++ linux-2.6.32.42/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
57943 @@ -54,7 +54,7 @@ struct netns_ipv4 {
57944 int current_rt_cache_rebuild_count;
57945
57946 struct timer_list rt_secret_timer;
57947 - atomic_t rt_genid;
57948 + atomic_unchecked_t rt_genid;
57949
57950 #ifdef CONFIG_IP_MROUTE
57951 struct sock *mroute_sk;
57952 diff -urNp linux-2.6.32.42/include/net/sctp/sctp.h linux-2.6.32.42/include/net/sctp/sctp.h
57953 --- linux-2.6.32.42/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
57954 +++ linux-2.6.32.42/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
57955 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
57956
57957 #else /* SCTP_DEBUG */
57958
57959 -#define SCTP_DEBUG_PRINTK(whatever...)
57960 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
57961 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
57962 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
57963 #define SCTP_ENABLE_DEBUG
57964 #define SCTP_DISABLE_DEBUG
57965 #define SCTP_ASSERT(expr, str, func)
57966 diff -urNp linux-2.6.32.42/include/net/sock.h linux-2.6.32.42/include/net/sock.h
57967 --- linux-2.6.32.42/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
57968 +++ linux-2.6.32.42/include/net/sock.h 2011-05-04 17:56:28.000000000 -0400
57969 @@ -272,7 +272,7 @@ struct sock {
57970 rwlock_t sk_callback_lock;
57971 int sk_err,
57972 sk_err_soft;
57973 - atomic_t sk_drops;
57974 + atomic_unchecked_t sk_drops;
57975 unsigned short sk_ack_backlog;
57976 unsigned short sk_max_ack_backlog;
57977 __u32 sk_priority;
57978 diff -urNp linux-2.6.32.42/include/net/tcp.h linux-2.6.32.42/include/net/tcp.h
57979 --- linux-2.6.32.42/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
57980 +++ linux-2.6.32.42/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
57981 @@ -1444,6 +1444,7 @@ enum tcp_seq_states {
57982 struct tcp_seq_afinfo {
57983 char *name;
57984 sa_family_t family;
57985 + /* cannot be const */
57986 struct file_operations seq_fops;
57987 struct seq_operations seq_ops;
57988 };
57989 diff -urNp linux-2.6.32.42/include/net/udp.h linux-2.6.32.42/include/net/udp.h
57990 --- linux-2.6.32.42/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
57991 +++ linux-2.6.32.42/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
57992 @@ -187,6 +187,7 @@ struct udp_seq_afinfo {
57993 char *name;
57994 sa_family_t family;
57995 struct udp_table *udp_table;
57996 + /* cannot be const */
57997 struct file_operations seq_fops;
57998 struct seq_operations seq_ops;
57999 };
58000 diff -urNp linux-2.6.32.42/include/scsi/scsi_device.h linux-2.6.32.42/include/scsi/scsi_device.h
58001 --- linux-2.6.32.42/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
58002 +++ linux-2.6.32.42/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
58003 @@ -156,9 +156,9 @@ struct scsi_device {
58004 unsigned int max_device_blocked; /* what device_blocked counts down from */
58005 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
58006
58007 - atomic_t iorequest_cnt;
58008 - atomic_t iodone_cnt;
58009 - atomic_t ioerr_cnt;
58010 + atomic_unchecked_t iorequest_cnt;
58011 + atomic_unchecked_t iodone_cnt;
58012 + atomic_unchecked_t ioerr_cnt;
58013
58014 struct device sdev_gendev,
58015 sdev_dev;
58016 diff -urNp linux-2.6.32.42/include/sound/ac97_codec.h linux-2.6.32.42/include/sound/ac97_codec.h
58017 --- linux-2.6.32.42/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
58018 +++ linux-2.6.32.42/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
58019 @@ -419,15 +419,15 @@
58020 struct snd_ac97;
58021
58022 struct snd_ac97_build_ops {
58023 - int (*build_3d) (struct snd_ac97 *ac97);
58024 - int (*build_specific) (struct snd_ac97 *ac97);
58025 - int (*build_spdif) (struct snd_ac97 *ac97);
58026 - int (*build_post_spdif) (struct snd_ac97 *ac97);
58027 + int (* const build_3d) (struct snd_ac97 *ac97);
58028 + int (* const build_specific) (struct snd_ac97 *ac97);
58029 + int (* const build_spdif) (struct snd_ac97 *ac97);
58030 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
58031 #ifdef CONFIG_PM
58032 - void (*suspend) (struct snd_ac97 *ac97);
58033 - void (*resume) (struct snd_ac97 *ac97);
58034 + void (* const suspend) (struct snd_ac97 *ac97);
58035 + void (* const resume) (struct snd_ac97 *ac97);
58036 #endif
58037 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
58038 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
58039 };
58040
58041 struct snd_ac97_bus_ops {
58042 @@ -477,7 +477,7 @@ struct snd_ac97_template {
58043
58044 struct snd_ac97 {
58045 /* -- lowlevel (hardware) driver specific -- */
58046 - struct snd_ac97_build_ops * build_ops;
58047 + const struct snd_ac97_build_ops * build_ops;
58048 void *private_data;
58049 void (*private_free) (struct snd_ac97 *ac97);
58050 /* --- */
58051 diff -urNp linux-2.6.32.42/include/sound/ymfpci.h linux-2.6.32.42/include/sound/ymfpci.h
58052 --- linux-2.6.32.42/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
58053 +++ linux-2.6.32.42/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
58054 @@ -358,7 +358,7 @@ struct snd_ymfpci {
58055 spinlock_t reg_lock;
58056 spinlock_t voice_lock;
58057 wait_queue_head_t interrupt_sleep;
58058 - atomic_t interrupt_sleep_count;
58059 + atomic_unchecked_t interrupt_sleep_count;
58060 struct snd_info_entry *proc_entry;
58061 const struct firmware *dsp_microcode;
58062 const struct firmware *controller_microcode;
58063 diff -urNp linux-2.6.32.42/include/trace/events/irq.h linux-2.6.32.42/include/trace/events/irq.h
58064 --- linux-2.6.32.42/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
58065 +++ linux-2.6.32.42/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
58066 @@ -34,7 +34,7 @@
58067 */
58068 TRACE_EVENT(irq_handler_entry,
58069
58070 - TP_PROTO(int irq, struct irqaction *action),
58071 + TP_PROTO(int irq, const struct irqaction *action),
58072
58073 TP_ARGS(irq, action),
58074
58075 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
58076 */
58077 TRACE_EVENT(irq_handler_exit,
58078
58079 - TP_PROTO(int irq, struct irqaction *action, int ret),
58080 + TP_PROTO(int irq, const struct irqaction *action, int ret),
58081
58082 TP_ARGS(irq, action, ret),
58083
58084 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
58085 */
58086 TRACE_EVENT(softirq_entry,
58087
58088 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
58089 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
58090
58091 TP_ARGS(h, vec),
58092
58093 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
58094 */
58095 TRACE_EVENT(softirq_exit,
58096
58097 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
58098 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
58099
58100 TP_ARGS(h, vec),
58101
58102 diff -urNp linux-2.6.32.42/include/video/uvesafb.h linux-2.6.32.42/include/video/uvesafb.h
58103 --- linux-2.6.32.42/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
58104 +++ linux-2.6.32.42/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
58105 @@ -177,6 +177,7 @@ struct uvesafb_par {
58106 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
58107 u8 pmi_setpal; /* PMI for palette changes */
58108 u16 *pmi_base; /* protected mode interface location */
58109 + u8 *pmi_code; /* protected mode code location */
58110 void *pmi_start;
58111 void *pmi_pal;
58112 u8 *vbe_state_orig; /*
58113 diff -urNp linux-2.6.32.42/init/do_mounts.c linux-2.6.32.42/init/do_mounts.c
58114 --- linux-2.6.32.42/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
58115 +++ linux-2.6.32.42/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
58116 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
58117
58118 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
58119 {
58120 - int err = sys_mount(name, "/root", fs, flags, data);
58121 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
58122 if (err)
58123 return err;
58124
58125 - sys_chdir("/root");
58126 + sys_chdir((__force const char __user *)"/root");
58127 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
58128 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
58129 current->fs->pwd.mnt->mnt_sb->s_type->name,
58130 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
58131 va_start(args, fmt);
58132 vsprintf(buf, fmt, args);
58133 va_end(args);
58134 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
58135 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
58136 if (fd >= 0) {
58137 sys_ioctl(fd, FDEJECT, 0);
58138 sys_close(fd);
58139 }
58140 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
58141 - fd = sys_open("/dev/console", O_RDWR, 0);
58142 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
58143 if (fd >= 0) {
58144 sys_ioctl(fd, TCGETS, (long)&termios);
58145 termios.c_lflag &= ~ICANON;
58146 sys_ioctl(fd, TCSETSF, (long)&termios);
58147 - sys_read(fd, &c, 1);
58148 + sys_read(fd, (char __user *)&c, 1);
58149 termios.c_lflag |= ICANON;
58150 sys_ioctl(fd, TCSETSF, (long)&termios);
58151 sys_close(fd);
58152 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
58153 mount_root();
58154 out:
58155 devtmpfs_mount("dev");
58156 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58157 - sys_chroot(".");
58158 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58159 + sys_chroot((__force char __user *)".");
58160 }
58161 diff -urNp linux-2.6.32.42/init/do_mounts.h linux-2.6.32.42/init/do_mounts.h
58162 --- linux-2.6.32.42/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
58163 +++ linux-2.6.32.42/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
58164 @@ -15,15 +15,15 @@ extern int root_mountflags;
58165
58166 static inline int create_dev(char *name, dev_t dev)
58167 {
58168 - sys_unlink(name);
58169 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
58170 + sys_unlink((__force char __user *)name);
58171 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
58172 }
58173
58174 #if BITS_PER_LONG == 32
58175 static inline u32 bstat(char *name)
58176 {
58177 struct stat64 stat;
58178 - if (sys_stat64(name, &stat) != 0)
58179 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
58180 return 0;
58181 if (!S_ISBLK(stat.st_mode))
58182 return 0;
58183 diff -urNp linux-2.6.32.42/init/do_mounts_initrd.c linux-2.6.32.42/init/do_mounts_initrd.c
58184 --- linux-2.6.32.42/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
58185 +++ linux-2.6.32.42/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
58186 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
58187 sys_close(old_fd);sys_close(root_fd);
58188 sys_close(0);sys_close(1);sys_close(2);
58189 sys_setsid();
58190 - (void) sys_open("/dev/console",O_RDWR,0);
58191 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
58192 (void) sys_dup(0);
58193 (void) sys_dup(0);
58194 return kernel_execve(shell, argv, envp_init);
58195 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
58196 create_dev("/dev/root.old", Root_RAM0);
58197 /* mount initrd on rootfs' /root */
58198 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
58199 - sys_mkdir("/old", 0700);
58200 - root_fd = sys_open("/", 0, 0);
58201 - old_fd = sys_open("/old", 0, 0);
58202 + sys_mkdir((__force const char __user *)"/old", 0700);
58203 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
58204 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
58205 /* move initrd over / and chdir/chroot in initrd root */
58206 - sys_chdir("/root");
58207 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58208 - sys_chroot(".");
58209 + sys_chdir((__force const char __user *)"/root");
58210 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58211 + sys_chroot((__force const char __user *)".");
58212
58213 /*
58214 * In case that a resume from disk is carried out by linuxrc or one of
58215 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
58216
58217 /* move initrd to rootfs' /old */
58218 sys_fchdir(old_fd);
58219 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
58220 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
58221 /* switch root and cwd back to / of rootfs */
58222 sys_fchdir(root_fd);
58223 - sys_chroot(".");
58224 + sys_chroot((__force const char __user *)".");
58225 sys_close(old_fd);
58226 sys_close(root_fd);
58227
58228 if (new_decode_dev(real_root_dev) == Root_RAM0) {
58229 - sys_chdir("/old");
58230 + sys_chdir((__force const char __user *)"/old");
58231 return;
58232 }
58233
58234 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
58235 mount_root();
58236
58237 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
58238 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
58239 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
58240 if (!error)
58241 printk("okay\n");
58242 else {
58243 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
58244 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
58245 if (error == -ENOENT)
58246 printk("/initrd does not exist. Ignored.\n");
58247 else
58248 printk("failed\n");
58249 printk(KERN_NOTICE "Unmounting old root\n");
58250 - sys_umount("/old", MNT_DETACH);
58251 + sys_umount((__force char __user *)"/old", MNT_DETACH);
58252 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
58253 if (fd < 0) {
58254 error = fd;
58255 @@ -119,11 +119,11 @@ int __init initrd_load(void)
58256 * mounted in the normal path.
58257 */
58258 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
58259 - sys_unlink("/initrd.image");
58260 + sys_unlink((__force const char __user *)"/initrd.image");
58261 handle_initrd();
58262 return 1;
58263 }
58264 }
58265 - sys_unlink("/initrd.image");
58266 + sys_unlink((__force const char __user *)"/initrd.image");
58267 return 0;
58268 }
58269 diff -urNp linux-2.6.32.42/init/do_mounts_md.c linux-2.6.32.42/init/do_mounts_md.c
58270 --- linux-2.6.32.42/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
58271 +++ linux-2.6.32.42/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
58272 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
58273 partitioned ? "_d" : "", minor,
58274 md_setup_args[ent].device_names);
58275
58276 - fd = sys_open(name, 0, 0);
58277 + fd = sys_open((__force char __user *)name, 0, 0);
58278 if (fd < 0) {
58279 printk(KERN_ERR "md: open failed - cannot start "
58280 "array %s\n", name);
58281 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
58282 * array without it
58283 */
58284 sys_close(fd);
58285 - fd = sys_open(name, 0, 0);
58286 + fd = sys_open((__force char __user *)name, 0, 0);
58287 sys_ioctl(fd, BLKRRPART, 0);
58288 }
58289 sys_close(fd);
58290 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
58291
58292 wait_for_device_probe();
58293
58294 - fd = sys_open("/dev/md0", 0, 0);
58295 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
58296 if (fd >= 0) {
58297 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
58298 sys_close(fd);
58299 diff -urNp linux-2.6.32.42/init/initramfs.c linux-2.6.32.42/init/initramfs.c
58300 --- linux-2.6.32.42/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
58301 +++ linux-2.6.32.42/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
58302 @@ -74,7 +74,7 @@ static void __init free_hash(void)
58303 }
58304 }
58305
58306 -static long __init do_utime(char __user *filename, time_t mtime)
58307 +static long __init do_utime(__force char __user *filename, time_t mtime)
58308 {
58309 struct timespec t[2];
58310
58311 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
58312 struct dir_entry *de, *tmp;
58313 list_for_each_entry_safe(de, tmp, &dir_list, list) {
58314 list_del(&de->list);
58315 - do_utime(de->name, de->mtime);
58316 + do_utime((__force char __user *)de->name, de->mtime);
58317 kfree(de->name);
58318 kfree(de);
58319 }
58320 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
58321 if (nlink >= 2) {
58322 char *old = find_link(major, minor, ino, mode, collected);
58323 if (old)
58324 - return (sys_link(old, collected) < 0) ? -1 : 1;
58325 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
58326 }
58327 return 0;
58328 }
58329 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
58330 {
58331 struct stat st;
58332
58333 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
58334 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
58335 if (S_ISDIR(st.st_mode))
58336 - sys_rmdir(path);
58337 + sys_rmdir((__force char __user *)path);
58338 else
58339 - sys_unlink(path);
58340 + sys_unlink((__force char __user *)path);
58341 }
58342 }
58343
58344 @@ -305,7 +305,7 @@ static int __init do_name(void)
58345 int openflags = O_WRONLY|O_CREAT;
58346 if (ml != 1)
58347 openflags |= O_TRUNC;
58348 - wfd = sys_open(collected, openflags, mode);
58349 + wfd = sys_open((__force char __user *)collected, openflags, mode);
58350
58351 if (wfd >= 0) {
58352 sys_fchown(wfd, uid, gid);
58353 @@ -317,17 +317,17 @@ static int __init do_name(void)
58354 }
58355 }
58356 } else if (S_ISDIR(mode)) {
58357 - sys_mkdir(collected, mode);
58358 - sys_chown(collected, uid, gid);
58359 - sys_chmod(collected, mode);
58360 + sys_mkdir((__force char __user *)collected, mode);
58361 + sys_chown((__force char __user *)collected, uid, gid);
58362 + sys_chmod((__force char __user *)collected, mode);
58363 dir_add(collected, mtime);
58364 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
58365 S_ISFIFO(mode) || S_ISSOCK(mode)) {
58366 if (maybe_link() == 0) {
58367 - sys_mknod(collected, mode, rdev);
58368 - sys_chown(collected, uid, gid);
58369 - sys_chmod(collected, mode);
58370 - do_utime(collected, mtime);
58371 + sys_mknod((__force char __user *)collected, mode, rdev);
58372 + sys_chown((__force char __user *)collected, uid, gid);
58373 + sys_chmod((__force char __user *)collected, mode);
58374 + do_utime((__force char __user *)collected, mtime);
58375 }
58376 }
58377 return 0;
58378 @@ -336,15 +336,15 @@ static int __init do_name(void)
58379 static int __init do_copy(void)
58380 {
58381 if (count >= body_len) {
58382 - sys_write(wfd, victim, body_len);
58383 + sys_write(wfd, (__force char __user *)victim, body_len);
58384 sys_close(wfd);
58385 - do_utime(vcollected, mtime);
58386 + do_utime((__force char __user *)vcollected, mtime);
58387 kfree(vcollected);
58388 eat(body_len);
58389 state = SkipIt;
58390 return 0;
58391 } else {
58392 - sys_write(wfd, victim, count);
58393 + sys_write(wfd, (__force char __user *)victim, count);
58394 body_len -= count;
58395 eat(count);
58396 return 1;
58397 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
58398 {
58399 collected[N_ALIGN(name_len) + body_len] = '\0';
58400 clean_path(collected, 0);
58401 - sys_symlink(collected + N_ALIGN(name_len), collected);
58402 - sys_lchown(collected, uid, gid);
58403 - do_utime(collected, mtime);
58404 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
58405 + sys_lchown((__force char __user *)collected, uid, gid);
58406 + do_utime((__force char __user *)collected, mtime);
58407 state = SkipIt;
58408 next_state = Reset;
58409 return 0;
58410 diff -urNp linux-2.6.32.42/init/Kconfig linux-2.6.32.42/init/Kconfig
58411 --- linux-2.6.32.42/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
58412 +++ linux-2.6.32.42/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
58413 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
58414
58415 config COMPAT_BRK
58416 bool "Disable heap randomization"
58417 - default y
58418 + default n
58419 help
58420 Randomizing heap placement makes heap exploits harder, but it
58421 also breaks ancient binaries (including anything libc5 based).
58422 diff -urNp linux-2.6.32.42/init/main.c linux-2.6.32.42/init/main.c
58423 --- linux-2.6.32.42/init/main.c 2011-05-10 22:12:01.000000000 -0400
58424 +++ linux-2.6.32.42/init/main.c 2011-05-22 23:02:06.000000000 -0400
58425 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
58426 #ifdef CONFIG_TC
58427 extern void tc_init(void);
58428 #endif
58429 +extern void grsecurity_init(void);
58430
58431 enum system_states system_state __read_mostly;
58432 EXPORT_SYMBOL(system_state);
58433 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char
58434
58435 __setup("reset_devices", set_reset_devices);
58436
58437 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
58438 +extern char pax_enter_kernel_user[];
58439 +extern char pax_exit_kernel_user[];
58440 +extern pgdval_t clone_pgd_mask;
58441 +#endif
58442 +
58443 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
58444 +static int __init setup_pax_nouderef(char *str)
58445 +{
58446 +#ifdef CONFIG_X86_32
58447 + unsigned int cpu;
58448 + struct desc_struct *gdt;
58449 +
58450 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
58451 + gdt = get_cpu_gdt_table(cpu);
58452 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
58453 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
58454 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
58455 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
58456 + }
58457 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
58458 +#else
58459 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
58460 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
58461 + clone_pgd_mask = ~(pgdval_t)0UL;
58462 +#endif
58463 +
58464 + return 0;
58465 +}
58466 +early_param("pax_nouderef", setup_pax_nouderef);
58467 +#endif
58468 +
58469 +#ifdef CONFIG_PAX_SOFTMODE
58470 +unsigned int pax_softmode;
58471 +
58472 +static int __init setup_pax_softmode(char *str)
58473 +{
58474 + get_option(&str, &pax_softmode);
58475 + return 1;
58476 +}
58477 +__setup("pax_softmode=", setup_pax_softmode);
58478 +#endif
58479 +
58480 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
58481 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
58482 static const char *panic_later, *panic_param;
58483 @@ -705,52 +749,53 @@ int initcall_debug;
58484 core_param(initcall_debug, initcall_debug, bool, 0644);
58485
58486 static char msgbuf[64];
58487 -static struct boot_trace_call call;
58488 -static struct boot_trace_ret ret;
58489 +static struct boot_trace_call trace_call;
58490 +static struct boot_trace_ret trace_ret;
58491
58492 int do_one_initcall(initcall_t fn)
58493 {
58494 int count = preempt_count();
58495 ktime_t calltime, delta, rettime;
58496 + const char *msg1 = "", *msg2 = "";
58497
58498 if (initcall_debug) {
58499 - call.caller = task_pid_nr(current);
58500 - printk("calling %pF @ %i\n", fn, call.caller);
58501 + trace_call.caller = task_pid_nr(current);
58502 + printk("calling %pF @ %i\n", fn, trace_call.caller);
58503 calltime = ktime_get();
58504 - trace_boot_call(&call, fn);
58505 + trace_boot_call(&trace_call, fn);
58506 enable_boot_trace();
58507 }
58508
58509 - ret.result = fn();
58510 + trace_ret.result = fn();
58511
58512 if (initcall_debug) {
58513 disable_boot_trace();
58514 rettime = ktime_get();
58515 delta = ktime_sub(rettime, calltime);
58516 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
58517 - trace_boot_ret(&ret, fn);
58518 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
58519 + trace_boot_ret(&trace_ret, fn);
58520 printk("initcall %pF returned %d after %Ld usecs\n", fn,
58521 - ret.result, ret.duration);
58522 + trace_ret.result, trace_ret.duration);
58523 }
58524
58525 msgbuf[0] = 0;
58526
58527 - if (ret.result && ret.result != -ENODEV && initcall_debug)
58528 - sprintf(msgbuf, "error code %d ", ret.result);
58529 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
58530 + sprintf(msgbuf, "error code %d ", trace_ret.result);
58531
58532 if (preempt_count() != count) {
58533 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
58534 + msg1 = " preemption imbalance";
58535 preempt_count() = count;
58536 }
58537 if (irqs_disabled()) {
58538 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
58539 + msg2 = " disabled interrupts";
58540 local_irq_enable();
58541 }
58542 - if (msgbuf[0]) {
58543 - printk("initcall %pF returned with %s\n", fn, msgbuf);
58544 + if (msgbuf[0] || *msg1 || *msg2) {
58545 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
58546 }
58547
58548 - return ret.result;
58549 + return trace_ret.result;
58550 }
58551
58552
58553 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
58554 if (!ramdisk_execute_command)
58555 ramdisk_execute_command = "/init";
58556
58557 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
58558 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
58559 ramdisk_execute_command = NULL;
58560 prepare_namespace();
58561 }
58562
58563 + grsecurity_init();
58564 +
58565 /*
58566 * Ok, we have completed the initial bootup, and
58567 * we're essentially up and running. Get rid of the
58568 diff -urNp linux-2.6.32.42/init/noinitramfs.c linux-2.6.32.42/init/noinitramfs.c
58569 --- linux-2.6.32.42/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
58570 +++ linux-2.6.32.42/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
58571 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
58572 {
58573 int err;
58574
58575 - err = sys_mkdir("/dev", 0755);
58576 + err = sys_mkdir((const char __user *)"/dev", 0755);
58577 if (err < 0)
58578 goto out;
58579
58580 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
58581 if (err < 0)
58582 goto out;
58583
58584 - err = sys_mkdir("/root", 0700);
58585 + err = sys_mkdir((const char __user *)"/root", 0700);
58586 if (err < 0)
58587 goto out;
58588
58589 diff -urNp linux-2.6.32.42/ipc/mqueue.c linux-2.6.32.42/ipc/mqueue.c
58590 --- linux-2.6.32.42/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
58591 +++ linux-2.6.32.42/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
58592 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
58593 mq_bytes = (mq_msg_tblsz +
58594 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
58595
58596 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
58597 spin_lock(&mq_lock);
58598 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
58599 u->mq_bytes + mq_bytes >
58600 diff -urNp linux-2.6.32.42/ipc/sem.c linux-2.6.32.42/ipc/sem.c
58601 --- linux-2.6.32.42/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
58602 +++ linux-2.6.32.42/ipc/sem.c 2011-05-16 21:46:57.000000000 -0400
58603 @@ -671,6 +671,8 @@ static int semctl_main(struct ipc_namesp
58604 ushort* sem_io = fast_sem_io;
58605 int nsems;
58606
58607 + pax_track_stack();
58608 +
58609 sma = sem_lock_check(ns, semid);
58610 if (IS_ERR(sma))
58611 return PTR_ERR(sma);
58612 @@ -1071,6 +1073,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
58613 unsigned long jiffies_left = 0;
58614 struct ipc_namespace *ns;
58615
58616 + pax_track_stack();
58617 +
58618 ns = current->nsproxy->ipc_ns;
58619
58620 if (nsops < 1 || semid < 0)
58621 diff -urNp linux-2.6.32.42/ipc/shm.c linux-2.6.32.42/ipc/shm.c
58622 --- linux-2.6.32.42/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
58623 +++ linux-2.6.32.42/ipc/shm.c 2011-04-17 15:56:46.000000000 -0400
58624 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
58625 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
58626 #endif
58627
58628 +#ifdef CONFIG_GRKERNSEC
58629 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58630 + const time_t shm_createtime, const uid_t cuid,
58631 + const int shmid);
58632 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58633 + const time_t shm_createtime);
58634 +#endif
58635 +
58636 void shm_init_ns(struct ipc_namespace *ns)
58637 {
58638 ns->shm_ctlmax = SHMMAX;
58639 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
58640 shp->shm_lprid = 0;
58641 shp->shm_atim = shp->shm_dtim = 0;
58642 shp->shm_ctim = get_seconds();
58643 +#ifdef CONFIG_GRKERNSEC
58644 + {
58645 + struct timespec timeval;
58646 + do_posix_clock_monotonic_gettime(&timeval);
58647 +
58648 + shp->shm_createtime = timeval.tv_sec;
58649 + }
58650 +#endif
58651 shp->shm_segsz = size;
58652 shp->shm_nattch = 0;
58653 shp->shm_file = file;
58654 @@ -880,9 +896,21 @@ long do_shmat(int shmid, char __user *sh
58655 if (err)
58656 goto out_unlock;
58657
58658 +#ifdef CONFIG_GRKERNSEC
58659 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
58660 + shp->shm_perm.cuid, shmid) ||
58661 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
58662 + err = -EACCES;
58663 + goto out_unlock;
58664 + }
58665 +#endif
58666 +
58667 path.dentry = dget(shp->shm_file->f_path.dentry);
58668 path.mnt = shp->shm_file->f_path.mnt;
58669 shp->shm_nattch++;
58670 +#ifdef CONFIG_GRKERNSEC
58671 + shp->shm_lapid = current->pid;
58672 +#endif
58673 size = i_size_read(path.dentry->d_inode);
58674 shm_unlock(shp);
58675
58676 diff -urNp linux-2.6.32.42/kernel/acct.c linux-2.6.32.42/kernel/acct.c
58677 --- linux-2.6.32.42/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
58678 +++ linux-2.6.32.42/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
58679 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
58680 */
58681 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
58682 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
58683 - file->f_op->write(file, (char *)&ac,
58684 + file->f_op->write(file, (__force char __user *)&ac,
58685 sizeof(acct_t), &file->f_pos);
58686 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
58687 set_fs(fs);
58688 diff -urNp linux-2.6.32.42/kernel/audit.c linux-2.6.32.42/kernel/audit.c
58689 --- linux-2.6.32.42/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
58690 +++ linux-2.6.32.42/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
58691 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
58692 3) suppressed due to audit_rate_limit
58693 4) suppressed due to audit_backlog_limit
58694 */
58695 -static atomic_t audit_lost = ATOMIC_INIT(0);
58696 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
58697
58698 /* The netlink socket. */
58699 static struct sock *audit_sock;
58700 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
58701 unsigned long now;
58702 int print;
58703
58704 - atomic_inc(&audit_lost);
58705 + atomic_inc_unchecked(&audit_lost);
58706
58707 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
58708
58709 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
58710 printk(KERN_WARNING
58711 "audit: audit_lost=%d audit_rate_limit=%d "
58712 "audit_backlog_limit=%d\n",
58713 - atomic_read(&audit_lost),
58714 + atomic_read_unchecked(&audit_lost),
58715 audit_rate_limit,
58716 audit_backlog_limit);
58717 audit_panic(message);
58718 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
58719 status_set.pid = audit_pid;
58720 status_set.rate_limit = audit_rate_limit;
58721 status_set.backlog_limit = audit_backlog_limit;
58722 - status_set.lost = atomic_read(&audit_lost);
58723 + status_set.lost = atomic_read_unchecked(&audit_lost);
58724 status_set.backlog = skb_queue_len(&audit_skb_queue);
58725 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
58726 &status_set, sizeof(status_set));
58727 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
58728 spin_unlock_irq(&tsk->sighand->siglock);
58729 }
58730 read_unlock(&tasklist_lock);
58731 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
58732 - &s, sizeof(s));
58733 +
58734 + if (!err)
58735 + audit_send_reply(NETLINK_CB(skb).pid, seq,
58736 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
58737 break;
58738 }
58739 case AUDIT_TTY_SET: {
58740 diff -urNp linux-2.6.32.42/kernel/auditsc.c linux-2.6.32.42/kernel/auditsc.c
58741 --- linux-2.6.32.42/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
58742 +++ linux-2.6.32.42/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
58743 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
58744 }
58745
58746 /* global counter which is incremented every time something logs in */
58747 -static atomic_t session_id = ATOMIC_INIT(0);
58748 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
58749
58750 /**
58751 * audit_set_loginuid - set a task's audit_context loginuid
58752 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
58753 */
58754 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
58755 {
58756 - unsigned int sessionid = atomic_inc_return(&session_id);
58757 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
58758 struct audit_context *context = task->audit_context;
58759
58760 if (context && context->in_syscall) {
58761 diff -urNp linux-2.6.32.42/kernel/capability.c linux-2.6.32.42/kernel/capability.c
58762 --- linux-2.6.32.42/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
58763 +++ linux-2.6.32.42/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
58764 @@ -305,10 +305,26 @@ int capable(int cap)
58765 BUG();
58766 }
58767
58768 - if (security_capable(cap) == 0) {
58769 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
58770 current->flags |= PF_SUPERPRIV;
58771 return 1;
58772 }
58773 return 0;
58774 }
58775 +
58776 +int capable_nolog(int cap)
58777 +{
58778 + if (unlikely(!cap_valid(cap))) {
58779 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
58780 + BUG();
58781 + }
58782 +
58783 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
58784 + current->flags |= PF_SUPERPRIV;
58785 + return 1;
58786 + }
58787 + return 0;
58788 +}
58789 +
58790 EXPORT_SYMBOL(capable);
58791 +EXPORT_SYMBOL(capable_nolog);
58792 diff -urNp linux-2.6.32.42/kernel/cgroup.c linux-2.6.32.42/kernel/cgroup.c
58793 --- linux-2.6.32.42/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
58794 +++ linux-2.6.32.42/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
58795 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
58796 struct hlist_head *hhead;
58797 struct cg_cgroup_link *link;
58798
58799 + pax_track_stack();
58800 +
58801 /* First see if we already have a cgroup group that matches
58802 * the desired set */
58803 read_lock(&css_set_lock);
58804 diff -urNp linux-2.6.32.42/kernel/configs.c linux-2.6.32.42/kernel/configs.c
58805 --- linux-2.6.32.42/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
58806 +++ linux-2.6.32.42/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
58807 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
58808 struct proc_dir_entry *entry;
58809
58810 /* create the current config file */
58811 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
58812 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
58813 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
58814 + &ikconfig_file_ops);
58815 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58816 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
58817 + &ikconfig_file_ops);
58818 +#endif
58819 +#else
58820 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
58821 &ikconfig_file_ops);
58822 +#endif
58823 +
58824 if (!entry)
58825 return -ENOMEM;
58826
58827 diff -urNp linux-2.6.32.42/kernel/cpu.c linux-2.6.32.42/kernel/cpu.c
58828 --- linux-2.6.32.42/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
58829 +++ linux-2.6.32.42/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
58830 @@ -19,7 +19,7 @@
58831 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
58832 static DEFINE_MUTEX(cpu_add_remove_lock);
58833
58834 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
58835 +static RAW_NOTIFIER_HEAD(cpu_chain);
58836
58837 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
58838 * Should always be manipulated under cpu_add_remove_lock
58839 diff -urNp linux-2.6.32.42/kernel/cred.c linux-2.6.32.42/kernel/cred.c
58840 --- linux-2.6.32.42/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
58841 +++ linux-2.6.32.42/kernel/cred.c 2011-05-17 19:26:34.000000000 -0400
58842 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
58843 */
58844 void __put_cred(struct cred *cred)
58845 {
58846 + pax_track_stack();
58847 +
58848 kdebug("__put_cred(%p{%d,%d})", cred,
58849 atomic_read(&cred->usage),
58850 read_cred_subscribers(cred));
58851 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
58852 {
58853 struct cred *cred;
58854
58855 + pax_track_stack();
58856 +
58857 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
58858 atomic_read(&tsk->cred->usage),
58859 read_cred_subscribers(tsk->cred));
58860 @@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
58861 {
58862 const struct cred *cred;
58863
58864 + pax_track_stack();
58865 +
58866 rcu_read_lock();
58867
58868 do {
58869 @@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
58870 {
58871 struct cred *new;
58872
58873 + pax_track_stack();
58874 +
58875 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
58876 if (!new)
58877 return NULL;
58878 @@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
58879 const struct cred *old;
58880 struct cred *new;
58881
58882 + pax_track_stack();
58883 +
58884 validate_process_creds();
58885
58886 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
58887 @@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
58888 struct thread_group_cred *tgcred = NULL;
58889 struct cred *new;
58890
58891 + pax_track_stack();
58892 +
58893 #ifdef CONFIG_KEYS
58894 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
58895 if (!tgcred)
58896 @@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
58897 struct cred *new;
58898 int ret;
58899
58900 + pax_track_stack();
58901 +
58902 mutex_init(&p->cred_guard_mutex);
58903
58904 if (
58905 @@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
58906 struct task_struct *task = current;
58907 const struct cred *old = task->real_cred;
58908
58909 + pax_track_stack();
58910 +
58911 kdebug("commit_creds(%p{%d,%d})", new,
58912 atomic_read(&new->usage),
58913 read_cred_subscribers(new));
58914 @@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
58915
58916 get_cred(new); /* we will require a ref for the subj creds too */
58917
58918 + gr_set_role_label(task, new->uid, new->gid);
58919 +
58920 /* dumpability changes */
58921 if (old->euid != new->euid ||
58922 old->egid != new->egid ||
58923 @@ -606,6 +624,8 @@ EXPORT_SYMBOL(commit_creds);
58924 */
58925 void abort_creds(struct cred *new)
58926 {
58927 + pax_track_stack();
58928 +
58929 kdebug("abort_creds(%p{%d,%d})", new,
58930 atomic_read(&new->usage),
58931 read_cred_subscribers(new));
58932 @@ -629,6 +649,8 @@ const struct cred *override_creds(const
58933 {
58934 const struct cred *old = current->cred;
58935
58936 + pax_track_stack();
58937 +
58938 kdebug("override_creds(%p{%d,%d})", new,
58939 atomic_read(&new->usage),
58940 read_cred_subscribers(new));
58941 @@ -658,6 +680,8 @@ void revert_creds(const struct cred *old
58942 {
58943 const struct cred *override = current->cred;
58944
58945 + pax_track_stack();
58946 +
58947 kdebug("revert_creds(%p{%d,%d})", old,
58948 atomic_read(&old->usage),
58949 read_cred_subscribers(old));
58950 @@ -704,6 +728,8 @@ struct cred *prepare_kernel_cred(struct
58951 const struct cred *old;
58952 struct cred *new;
58953
58954 + pax_track_stack();
58955 +
58956 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
58957 if (!new)
58958 return NULL;
58959 @@ -758,6 +784,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
58960 */
58961 int set_security_override(struct cred *new, u32 secid)
58962 {
58963 + pax_track_stack();
58964 +
58965 return security_kernel_act_as(new, secid);
58966 }
58967 EXPORT_SYMBOL(set_security_override);
58968 @@ -777,6 +805,8 @@ int set_security_override_from_ctx(struc
58969 u32 secid;
58970 int ret;
58971
58972 + pax_track_stack();
58973 +
58974 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
58975 if (ret < 0)
58976 return ret;
58977 diff -urNp linux-2.6.32.42/kernel/exit.c linux-2.6.32.42/kernel/exit.c
58978 --- linux-2.6.32.42/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
58979 +++ linux-2.6.32.42/kernel/exit.c 2011-04-17 15:56:46.000000000 -0400
58980 @@ -55,6 +55,10 @@
58981 #include <asm/pgtable.h>
58982 #include <asm/mmu_context.h>
58983
58984 +#ifdef CONFIG_GRKERNSEC
58985 +extern rwlock_t grsec_exec_file_lock;
58986 +#endif
58987 +
58988 static void exit_mm(struct task_struct * tsk);
58989
58990 static void __unhash_process(struct task_struct *p)
58991 @@ -174,6 +178,8 @@ void release_task(struct task_struct * p
58992 struct task_struct *leader;
58993 int zap_leader;
58994 repeat:
58995 + gr_del_task_from_ip_table(p);
58996 +
58997 tracehook_prepare_release_task(p);
58998 /* don't need to get the RCU readlock here - the process is dead and
58999 * can't be modifying its own credentials */
59000 @@ -341,11 +347,22 @@ static void reparent_to_kthreadd(void)
59001 {
59002 write_lock_irq(&tasklist_lock);
59003
59004 +#ifdef CONFIG_GRKERNSEC
59005 + write_lock(&grsec_exec_file_lock);
59006 + if (current->exec_file) {
59007 + fput(current->exec_file);
59008 + current->exec_file = NULL;
59009 + }
59010 + write_unlock(&grsec_exec_file_lock);
59011 +#endif
59012 +
59013 ptrace_unlink(current);
59014 /* Reparent to init */
59015 current->real_parent = current->parent = kthreadd_task;
59016 list_move_tail(&current->sibling, &current->real_parent->children);
59017
59018 + gr_set_kernel_label(current);
59019 +
59020 /* Set the exit signal to SIGCHLD so we signal init on exit */
59021 current->exit_signal = SIGCHLD;
59022
59023 @@ -397,7 +414,7 @@ int allow_signal(int sig)
59024 * know it'll be handled, so that they don't get converted to
59025 * SIGKILL or just silently dropped.
59026 */
59027 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
59028 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
59029 recalc_sigpending();
59030 spin_unlock_irq(&current->sighand->siglock);
59031 return 0;
59032 @@ -433,6 +450,17 @@ void daemonize(const char *name, ...)
59033 vsnprintf(current->comm, sizeof(current->comm), name, args);
59034 va_end(args);
59035
59036 +#ifdef CONFIG_GRKERNSEC
59037 + write_lock(&grsec_exec_file_lock);
59038 + if (current->exec_file) {
59039 + fput(current->exec_file);
59040 + current->exec_file = NULL;
59041 + }
59042 + write_unlock(&grsec_exec_file_lock);
59043 +#endif
59044 +
59045 + gr_set_kernel_label(current);
59046 +
59047 /*
59048 * If we were started as result of loading a module, close all of the
59049 * user space pages. We don't need them, and if we didn't close them
59050 @@ -897,17 +925,17 @@ NORET_TYPE void do_exit(long code)
59051 struct task_struct *tsk = current;
59052 int group_dead;
59053
59054 - profile_task_exit(tsk);
59055 -
59056 - WARN_ON(atomic_read(&tsk->fs_excl));
59057 -
59058 + /*
59059 + * Check this first since set_fs() below depends on
59060 + * current_thread_info(), which we better not access when we're in
59061 + * interrupt context. Other than that, we want to do the set_fs()
59062 + * as early as possible.
59063 + */
59064 if (unlikely(in_interrupt()))
59065 panic("Aiee, killing interrupt handler!");
59066 - if (unlikely(!tsk->pid))
59067 - panic("Attempted to kill the idle task!");
59068
59069 /*
59070 - * If do_exit is called because this processes oopsed, it's possible
59071 + * If do_exit is called because this processes Oops'ed, it's possible
59072 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
59073 * continuing. Amongst other possible reasons, this is to prevent
59074 * mm_release()->clear_child_tid() from writing to a user-controlled
59075 @@ -915,6 +943,13 @@ NORET_TYPE void do_exit(long code)
59076 */
59077 set_fs(USER_DS);
59078
59079 + profile_task_exit(tsk);
59080 +
59081 + WARN_ON(atomic_read(&tsk->fs_excl));
59082 +
59083 + if (unlikely(!tsk->pid))
59084 + panic("Attempted to kill the idle task!");
59085 +
59086 tracehook_report_exit(&code);
59087
59088 validate_creds_for_do_exit(tsk);
59089 @@ -973,6 +1008,9 @@ NORET_TYPE void do_exit(long code)
59090 tsk->exit_code = code;
59091 taskstats_exit(tsk, group_dead);
59092
59093 + gr_acl_handle_psacct(tsk, code);
59094 + gr_acl_handle_exit();
59095 +
59096 exit_mm(tsk);
59097
59098 if (group_dead)
59099 @@ -1188,7 +1226,7 @@ static int wait_task_zombie(struct wait_
59100
59101 if (unlikely(wo->wo_flags & WNOWAIT)) {
59102 int exit_code = p->exit_code;
59103 - int why, status;
59104 + int why;
59105
59106 get_task_struct(p);
59107 read_unlock(&tasklist_lock);
59108 diff -urNp linux-2.6.32.42/kernel/fork.c linux-2.6.32.42/kernel/fork.c
59109 --- linux-2.6.32.42/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
59110 +++ linux-2.6.32.42/kernel/fork.c 2011-04-17 15:56:46.000000000 -0400
59111 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
59112 *stackend = STACK_END_MAGIC; /* for overflow detection */
59113
59114 #ifdef CONFIG_CC_STACKPROTECTOR
59115 - tsk->stack_canary = get_random_int();
59116 + tsk->stack_canary = pax_get_random_long();
59117 #endif
59118
59119 /* One for us, one for whoever does the "release_task()" (usually parent) */
59120 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
59121 mm->locked_vm = 0;
59122 mm->mmap = NULL;
59123 mm->mmap_cache = NULL;
59124 - mm->free_area_cache = oldmm->mmap_base;
59125 - mm->cached_hole_size = ~0UL;
59126 + mm->free_area_cache = oldmm->free_area_cache;
59127 + mm->cached_hole_size = oldmm->cached_hole_size;
59128 mm->map_count = 0;
59129 cpumask_clear(mm_cpumask(mm));
59130 mm->mm_rb = RB_ROOT;
59131 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
59132 tmp->vm_flags &= ~VM_LOCKED;
59133 tmp->vm_mm = mm;
59134 tmp->vm_next = tmp->vm_prev = NULL;
59135 + tmp->vm_mirror = NULL;
59136 anon_vma_link(tmp);
59137 file = tmp->vm_file;
59138 if (file) {
59139 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
59140 if (retval)
59141 goto out;
59142 }
59143 +
59144 +#ifdef CONFIG_PAX_SEGMEXEC
59145 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
59146 + struct vm_area_struct *mpnt_m;
59147 +
59148 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
59149 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
59150 +
59151 + if (!mpnt->vm_mirror)
59152 + continue;
59153 +
59154 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
59155 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
59156 + mpnt->vm_mirror = mpnt_m;
59157 + } else {
59158 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
59159 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
59160 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
59161 + mpnt->vm_mirror->vm_mirror = mpnt;
59162 + }
59163 + }
59164 + BUG_ON(mpnt_m);
59165 + }
59166 +#endif
59167 +
59168 /* a new mm has just been created */
59169 arch_dup_mmap(oldmm, mm);
59170 retval = 0;
59171 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
59172 write_unlock(&fs->lock);
59173 return -EAGAIN;
59174 }
59175 - fs->users++;
59176 + atomic_inc(&fs->users);
59177 write_unlock(&fs->lock);
59178 return 0;
59179 }
59180 tsk->fs = copy_fs_struct(fs);
59181 if (!tsk->fs)
59182 return -ENOMEM;
59183 + gr_set_chroot_entries(tsk, &tsk->fs->root);
59184 return 0;
59185 }
59186
59187 @@ -1033,10 +1060,13 @@ static struct task_struct *copy_process(
59188 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
59189 #endif
59190 retval = -EAGAIN;
59191 +
59192 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
59193 +
59194 if (atomic_read(&p->real_cred->user->processes) >=
59195 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
59196 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
59197 - p->real_cred->user != INIT_USER)
59198 + if (p->real_cred->user != INIT_USER &&
59199 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
59200 goto bad_fork_free;
59201 }
59202
59203 @@ -1183,6 +1213,8 @@ static struct task_struct *copy_process(
59204 goto bad_fork_free_pid;
59205 }
59206
59207 + gr_copy_label(p);
59208 +
59209 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
59210 /*
59211 * Clear TID on mm_release()?
59212 @@ -1333,6 +1365,8 @@ bad_fork_cleanup_count:
59213 bad_fork_free:
59214 free_task(p);
59215 fork_out:
59216 + gr_log_forkfail(retval);
59217 +
59218 return ERR_PTR(retval);
59219 }
59220
59221 @@ -1426,6 +1460,8 @@ long do_fork(unsigned long clone_flags,
59222 if (clone_flags & CLONE_PARENT_SETTID)
59223 put_user(nr, parent_tidptr);
59224
59225 + gr_handle_brute_check();
59226 +
59227 if (clone_flags & CLONE_VFORK) {
59228 p->vfork_done = &vfork;
59229 init_completion(&vfork);
59230 @@ -1558,7 +1594,7 @@ static int unshare_fs(unsigned long unsh
59231 return 0;
59232
59233 /* don't need lock here; in the worst case we'll do useless copy */
59234 - if (fs->users == 1)
59235 + if (atomic_read(&fs->users) == 1)
59236 return 0;
59237
59238 *new_fsp = copy_fs_struct(fs);
59239 @@ -1681,7 +1717,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
59240 fs = current->fs;
59241 write_lock(&fs->lock);
59242 current->fs = new_fs;
59243 - if (--fs->users)
59244 + gr_set_chroot_entries(current, &current->fs->root);
59245 + if (atomic_dec_return(&fs->users))
59246 new_fs = NULL;
59247 else
59248 new_fs = fs;
59249 diff -urNp linux-2.6.32.42/kernel/futex.c linux-2.6.32.42/kernel/futex.c
59250 --- linux-2.6.32.42/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
59251 +++ linux-2.6.32.42/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
59252 @@ -54,6 +54,7 @@
59253 #include <linux/mount.h>
59254 #include <linux/pagemap.h>
59255 #include <linux/syscalls.h>
59256 +#include <linux/ptrace.h>
59257 #include <linux/signal.h>
59258 #include <linux/module.h>
59259 #include <linux/magic.h>
59260 @@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
59261 struct page *page;
59262 int err;
59263
59264 +#ifdef CONFIG_PAX_SEGMEXEC
59265 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
59266 + return -EFAULT;
59267 +#endif
59268 +
59269 /*
59270 * The futex address must be "naturally" aligned.
59271 */
59272 @@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
59273 struct futex_q q;
59274 int ret;
59275
59276 + pax_track_stack();
59277 +
59278 if (!bitset)
59279 return -EINVAL;
59280
59281 @@ -1841,7 +1849,7 @@ retry:
59282
59283 restart = &current_thread_info()->restart_block;
59284 restart->fn = futex_wait_restart;
59285 - restart->futex.uaddr = (u32 *)uaddr;
59286 + restart->futex.uaddr = uaddr;
59287 restart->futex.val = val;
59288 restart->futex.time = abs_time->tv64;
59289 restart->futex.bitset = bitset;
59290 @@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
59291 struct futex_q q;
59292 int res, ret;
59293
59294 + pax_track_stack();
59295 +
59296 if (!bitset)
59297 return -EINVAL;
59298
59299 @@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59300 {
59301 struct robust_list_head __user *head;
59302 unsigned long ret;
59303 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59304 const struct cred *cred = current_cred(), *pcred;
59305 +#endif
59306
59307 if (!futex_cmpxchg_enabled)
59308 return -ENOSYS;
59309 @@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59310 if (!p)
59311 goto err_unlock;
59312 ret = -EPERM;
59313 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59314 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
59315 + goto err_unlock;
59316 +#else
59317 pcred = __task_cred(p);
59318 if (cred->euid != pcred->euid &&
59319 cred->euid != pcred->uid &&
59320 !capable(CAP_SYS_PTRACE))
59321 goto err_unlock;
59322 +#endif
59323 head = p->robust_list;
59324 rcu_read_unlock();
59325 }
59326 @@ -2459,7 +2476,7 @@ retry:
59327 */
59328 static inline int fetch_robust_entry(struct robust_list __user **entry,
59329 struct robust_list __user * __user *head,
59330 - int *pi)
59331 + unsigned int *pi)
59332 {
59333 unsigned long uentry;
59334
59335 @@ -2640,6 +2657,7 @@ static int __init futex_init(void)
59336 {
59337 u32 curval;
59338 int i;
59339 + mm_segment_t oldfs;
59340
59341 /*
59342 * This will fail and we want it. Some arch implementations do
59343 @@ -2651,7 +2669,10 @@ static int __init futex_init(void)
59344 * implementation, the non functional ones will return
59345 * -ENOSYS.
59346 */
59347 + oldfs = get_fs();
59348 + set_fs(USER_DS);
59349 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
59350 + set_fs(oldfs);
59351 if (curval == -EFAULT)
59352 futex_cmpxchg_enabled = 1;
59353
59354 diff -urNp linux-2.6.32.42/kernel/futex_compat.c linux-2.6.32.42/kernel/futex_compat.c
59355 --- linux-2.6.32.42/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
59356 +++ linux-2.6.32.42/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
59357 @@ -10,6 +10,7 @@
59358 #include <linux/compat.h>
59359 #include <linux/nsproxy.h>
59360 #include <linux/futex.h>
59361 +#include <linux/ptrace.h>
59362
59363 #include <asm/uaccess.h>
59364
59365 @@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
59366 {
59367 struct compat_robust_list_head __user *head;
59368 unsigned long ret;
59369 - const struct cred *cred = current_cred(), *pcred;
59370 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59371 + const struct cred *cred = current_cred();
59372 + const struct cred *pcred;
59373 +#endif
59374
59375 if (!futex_cmpxchg_enabled)
59376 return -ENOSYS;
59377 @@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
59378 if (!p)
59379 goto err_unlock;
59380 ret = -EPERM;
59381 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59382 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
59383 + goto err_unlock;
59384 +#else
59385 pcred = __task_cred(p);
59386 if (cred->euid != pcred->euid &&
59387 cred->euid != pcred->uid &&
59388 !capable(CAP_SYS_PTRACE))
59389 goto err_unlock;
59390 +#endif
59391 head = p->compat_robust_list;
59392 read_unlock(&tasklist_lock);
59393 }
59394 diff -urNp linux-2.6.32.42/kernel/gcov/base.c linux-2.6.32.42/kernel/gcov/base.c
59395 --- linux-2.6.32.42/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
59396 +++ linux-2.6.32.42/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
59397 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
59398 }
59399
59400 #ifdef CONFIG_MODULES
59401 -static inline int within(void *addr, void *start, unsigned long size)
59402 -{
59403 - return ((addr >= start) && (addr < start + size));
59404 -}
59405 -
59406 /* Update list and generate events when modules are unloaded. */
59407 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
59408 void *data)
59409 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
59410 prev = NULL;
59411 /* Remove entries located in module from linked list. */
59412 for (info = gcov_info_head; info; info = info->next) {
59413 - if (within(info, mod->module_core, mod->core_size)) {
59414 + if (within_module_core_rw((unsigned long)info, mod)) {
59415 if (prev)
59416 prev->next = info->next;
59417 else
59418 diff -urNp linux-2.6.32.42/kernel/hrtimer.c linux-2.6.32.42/kernel/hrtimer.c
59419 --- linux-2.6.32.42/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
59420 +++ linux-2.6.32.42/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
59421 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
59422 local_irq_restore(flags);
59423 }
59424
59425 -static void run_hrtimer_softirq(struct softirq_action *h)
59426 +static void run_hrtimer_softirq(void)
59427 {
59428 hrtimer_peek_ahead_timers();
59429 }
59430 diff -urNp linux-2.6.32.42/kernel/kallsyms.c linux-2.6.32.42/kernel/kallsyms.c
59431 --- linux-2.6.32.42/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
59432 +++ linux-2.6.32.42/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
59433 @@ -11,6 +11,9 @@
59434 * Changed the compression method from stem compression to "table lookup"
59435 * compression (see scripts/kallsyms.c for a more complete description)
59436 */
59437 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59438 +#define __INCLUDED_BY_HIDESYM 1
59439 +#endif
59440 #include <linux/kallsyms.h>
59441 #include <linux/module.h>
59442 #include <linux/init.h>
59443 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
59444
59445 static inline int is_kernel_inittext(unsigned long addr)
59446 {
59447 + if (system_state != SYSTEM_BOOTING)
59448 + return 0;
59449 +
59450 if (addr >= (unsigned long)_sinittext
59451 && addr <= (unsigned long)_einittext)
59452 return 1;
59453 return 0;
59454 }
59455
59456 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59457 +#ifdef CONFIG_MODULES
59458 +static inline int is_module_text(unsigned long addr)
59459 +{
59460 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
59461 + return 1;
59462 +
59463 + addr = ktla_ktva(addr);
59464 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
59465 +}
59466 +#else
59467 +static inline int is_module_text(unsigned long addr)
59468 +{
59469 + return 0;
59470 +}
59471 +#endif
59472 +#endif
59473 +
59474 static inline int is_kernel_text(unsigned long addr)
59475 {
59476 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
59477 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
59478
59479 static inline int is_kernel(unsigned long addr)
59480 {
59481 +
59482 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59483 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
59484 + return 1;
59485 +
59486 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
59487 +#else
59488 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
59489 +#endif
59490 +
59491 return 1;
59492 return in_gate_area_no_task(addr);
59493 }
59494
59495 static int is_ksym_addr(unsigned long addr)
59496 {
59497 +
59498 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59499 + if (is_module_text(addr))
59500 + return 0;
59501 +#endif
59502 +
59503 if (all_var)
59504 return is_kernel(addr);
59505
59506 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
59507
59508 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
59509 {
59510 - iter->name[0] = '\0';
59511 iter->nameoff = get_symbol_offset(new_pos);
59512 iter->pos = new_pos;
59513 }
59514 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
59515 {
59516 struct kallsym_iter *iter = m->private;
59517
59518 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59519 + if (current_uid())
59520 + return 0;
59521 +#endif
59522 +
59523 /* Some debugging symbols have no name. Ignore them. */
59524 if (!iter->name[0])
59525 return 0;
59526 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
59527 struct kallsym_iter *iter;
59528 int ret;
59529
59530 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
59531 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
59532 if (!iter)
59533 return -ENOMEM;
59534 reset_iter(iter, 0);
59535 diff -urNp linux-2.6.32.42/kernel/kgdb.c linux-2.6.32.42/kernel/kgdb.c
59536 --- linux-2.6.32.42/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
59537 +++ linux-2.6.32.42/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
59538 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
59539 /* Guard for recursive entry */
59540 static int exception_level;
59541
59542 -static struct kgdb_io *kgdb_io_ops;
59543 +static const struct kgdb_io *kgdb_io_ops;
59544 static DEFINE_SPINLOCK(kgdb_registration_lock);
59545
59546 /* kgdb console driver is loaded */
59547 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
59548 */
59549 static atomic_t passive_cpu_wait[NR_CPUS];
59550 static atomic_t cpu_in_kgdb[NR_CPUS];
59551 -atomic_t kgdb_setting_breakpoint;
59552 +atomic_unchecked_t kgdb_setting_breakpoint;
59553
59554 struct task_struct *kgdb_usethread;
59555 struct task_struct *kgdb_contthread;
59556 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
59557 sizeof(unsigned long)];
59558
59559 /* to keep track of the CPU which is doing the single stepping*/
59560 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59561 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59562
59563 /*
59564 * If you are debugging a problem where roundup (the collection of
59565 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
59566 return 0;
59567 if (kgdb_connected)
59568 return 1;
59569 - if (atomic_read(&kgdb_setting_breakpoint))
59570 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
59571 return 1;
59572 if (print_wait)
59573 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
59574 @@ -1426,8 +1426,8 @@ acquirelock:
59575 * instance of the exception handler wanted to come into the
59576 * debugger on a different CPU via a single step
59577 */
59578 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
59579 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
59580 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
59581 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
59582
59583 atomic_set(&kgdb_active, -1);
59584 touch_softlockup_watchdog();
59585 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
59586 *
59587 * Register it with the KGDB core.
59588 */
59589 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
59590 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
59591 {
59592 int err;
59593
59594 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
59595 *
59596 * Unregister it with the KGDB core.
59597 */
59598 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
59599 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
59600 {
59601 BUG_ON(kgdb_connected);
59602
59603 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
59604 */
59605 void kgdb_breakpoint(void)
59606 {
59607 - atomic_set(&kgdb_setting_breakpoint, 1);
59608 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
59609 wmb(); /* Sync point before breakpoint */
59610 arch_kgdb_breakpoint();
59611 wmb(); /* Sync point after breakpoint */
59612 - atomic_set(&kgdb_setting_breakpoint, 0);
59613 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
59614 }
59615 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
59616
59617 diff -urNp linux-2.6.32.42/kernel/kmod.c linux-2.6.32.42/kernel/kmod.c
59618 --- linux-2.6.32.42/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
59619 +++ linux-2.6.32.42/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
59620 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
59621 * If module auto-loading support is disabled then this function
59622 * becomes a no-operation.
59623 */
59624 -int __request_module(bool wait, const char *fmt, ...)
59625 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
59626 {
59627 - va_list args;
59628 char module_name[MODULE_NAME_LEN];
59629 unsigned int max_modprobes;
59630 int ret;
59631 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
59632 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
59633 static char *envp[] = { "HOME=/",
59634 "TERM=linux",
59635 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
59636 @@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
59637 if (ret)
59638 return ret;
59639
59640 - va_start(args, fmt);
59641 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
59642 - va_end(args);
59643 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
59644 if (ret >= MODULE_NAME_LEN)
59645 return -ENAMETOOLONG;
59646
59647 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59648 + if (!current_uid()) {
59649 + /* hack to workaround consolekit/udisks stupidity */
59650 + read_lock(&tasklist_lock);
59651 + if (!strcmp(current->comm, "mount") &&
59652 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
59653 + read_unlock(&tasklist_lock);
59654 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
59655 + return -EPERM;
59656 + }
59657 + read_unlock(&tasklist_lock);
59658 + }
59659 +#endif
59660 +
59661 /* If modprobe needs a service that is in a module, we get a recursive
59662 * loop. Limit the number of running kmod threads to max_threads/2 or
59663 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
59664 @@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
59665 atomic_dec(&kmod_concurrent);
59666 return ret;
59667 }
59668 +
59669 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
59670 +{
59671 + va_list args;
59672 + int ret;
59673 +
59674 + va_start(args, fmt);
59675 + ret = ____request_module(wait, module_param, fmt, args);
59676 + va_end(args);
59677 +
59678 + return ret;
59679 +}
59680 +
59681 +int __request_module(bool wait, const char *fmt, ...)
59682 +{
59683 + va_list args;
59684 + int ret;
59685 +
59686 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59687 + if (current_uid()) {
59688 + char module_param[MODULE_NAME_LEN];
59689 +
59690 + memset(module_param, 0, sizeof(module_param));
59691 +
59692 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
59693 +
59694 + va_start(args, fmt);
59695 + ret = ____request_module(wait, module_param, fmt, args);
59696 + va_end(args);
59697 +
59698 + return ret;
59699 + }
59700 +#endif
59701 +
59702 + va_start(args, fmt);
59703 + ret = ____request_module(wait, NULL, fmt, args);
59704 + va_end(args);
59705 +
59706 + return ret;
59707 +}
59708 +
59709 +
59710 EXPORT_SYMBOL(__request_module);
59711 #endif /* CONFIG_MODULES */
59712
59713 diff -urNp linux-2.6.32.42/kernel/kprobes.c linux-2.6.32.42/kernel/kprobes.c
59714 --- linux-2.6.32.42/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
59715 +++ linux-2.6.32.42/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
59716 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
59717 * kernel image and loaded module images reside. This is required
59718 * so x86_64 can correctly handle the %rip-relative fixups.
59719 */
59720 - kip->insns = module_alloc(PAGE_SIZE);
59721 + kip->insns = module_alloc_exec(PAGE_SIZE);
59722 if (!kip->insns) {
59723 kfree(kip);
59724 return NULL;
59725 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
59726 */
59727 if (!list_is_singular(&kprobe_insn_pages)) {
59728 list_del(&kip->list);
59729 - module_free(NULL, kip->insns);
59730 + module_free_exec(NULL, kip->insns);
59731 kfree(kip);
59732 }
59733 return 1;
59734 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
59735 {
59736 int i, err = 0;
59737 unsigned long offset = 0, size = 0;
59738 - char *modname, namebuf[128];
59739 + char *modname, namebuf[KSYM_NAME_LEN];
59740 const char *symbol_name;
59741 void *addr;
59742 struct kprobe_blackpoint *kb;
59743 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
59744 const char *sym = NULL;
59745 unsigned int i = *(loff_t *) v;
59746 unsigned long offset = 0;
59747 - char *modname, namebuf[128];
59748 + char *modname, namebuf[KSYM_NAME_LEN];
59749
59750 head = &kprobe_table[i];
59751 preempt_disable();
59752 diff -urNp linux-2.6.32.42/kernel/lockdep.c linux-2.6.32.42/kernel/lockdep.c
59753 --- linux-2.6.32.42/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
59754 +++ linux-2.6.32.42/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
59755 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
59756 /*
59757 * Various lockdep statistics:
59758 */
59759 -atomic_t chain_lookup_hits;
59760 -atomic_t chain_lookup_misses;
59761 -atomic_t hardirqs_on_events;
59762 -atomic_t hardirqs_off_events;
59763 -atomic_t redundant_hardirqs_on;
59764 -atomic_t redundant_hardirqs_off;
59765 -atomic_t softirqs_on_events;
59766 -atomic_t softirqs_off_events;
59767 -atomic_t redundant_softirqs_on;
59768 -atomic_t redundant_softirqs_off;
59769 -atomic_t nr_unused_locks;
59770 -atomic_t nr_cyclic_checks;
59771 -atomic_t nr_find_usage_forwards_checks;
59772 -atomic_t nr_find_usage_backwards_checks;
59773 +atomic_unchecked_t chain_lookup_hits;
59774 +atomic_unchecked_t chain_lookup_misses;
59775 +atomic_unchecked_t hardirqs_on_events;
59776 +atomic_unchecked_t hardirqs_off_events;
59777 +atomic_unchecked_t redundant_hardirqs_on;
59778 +atomic_unchecked_t redundant_hardirqs_off;
59779 +atomic_unchecked_t softirqs_on_events;
59780 +atomic_unchecked_t softirqs_off_events;
59781 +atomic_unchecked_t redundant_softirqs_on;
59782 +atomic_unchecked_t redundant_softirqs_off;
59783 +atomic_unchecked_t nr_unused_locks;
59784 +atomic_unchecked_t nr_cyclic_checks;
59785 +atomic_unchecked_t nr_find_usage_forwards_checks;
59786 +atomic_unchecked_t nr_find_usage_backwards_checks;
59787 #endif
59788
59789 /*
59790 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
59791 int i;
59792 #endif
59793
59794 +#ifdef CONFIG_PAX_KERNEXEC
59795 + start = ktla_ktva(start);
59796 +#endif
59797 +
59798 /*
59799 * static variable?
59800 */
59801 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
59802 */
59803 for_each_possible_cpu(i) {
59804 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
59805 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
59806 - + per_cpu_offset(i);
59807 + end = start + PERCPU_ENOUGH_ROOM;
59808
59809 if ((addr >= start) && (addr < end))
59810 return 1;
59811 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
59812 if (!static_obj(lock->key)) {
59813 debug_locks_off();
59814 printk("INFO: trying to register non-static key.\n");
59815 + printk("lock:%pS key:%pS.\n", lock, lock->key);
59816 printk("the code is fine but needs lockdep annotation.\n");
59817 printk("turning off the locking correctness validator.\n");
59818 dump_stack();
59819 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
59820 if (!class)
59821 return 0;
59822 }
59823 - debug_atomic_inc((atomic_t *)&class->ops);
59824 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
59825 if (very_verbose(class)) {
59826 printk("\nacquire class [%p] %s", class->key, class->name);
59827 if (class->name_version > 1)
59828 diff -urNp linux-2.6.32.42/kernel/lockdep_internals.h linux-2.6.32.42/kernel/lockdep_internals.h
59829 --- linux-2.6.32.42/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
59830 +++ linux-2.6.32.42/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
59831 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
59832 /*
59833 * Various lockdep statistics:
59834 */
59835 -extern atomic_t chain_lookup_hits;
59836 -extern atomic_t chain_lookup_misses;
59837 -extern atomic_t hardirqs_on_events;
59838 -extern atomic_t hardirqs_off_events;
59839 -extern atomic_t redundant_hardirqs_on;
59840 -extern atomic_t redundant_hardirqs_off;
59841 -extern atomic_t softirqs_on_events;
59842 -extern atomic_t softirqs_off_events;
59843 -extern atomic_t redundant_softirqs_on;
59844 -extern atomic_t redundant_softirqs_off;
59845 -extern atomic_t nr_unused_locks;
59846 -extern atomic_t nr_cyclic_checks;
59847 -extern atomic_t nr_cyclic_check_recursions;
59848 -extern atomic_t nr_find_usage_forwards_checks;
59849 -extern atomic_t nr_find_usage_forwards_recursions;
59850 -extern atomic_t nr_find_usage_backwards_checks;
59851 -extern atomic_t nr_find_usage_backwards_recursions;
59852 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
59853 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
59854 -# define debug_atomic_read(ptr) atomic_read(ptr)
59855 +extern atomic_unchecked_t chain_lookup_hits;
59856 +extern atomic_unchecked_t chain_lookup_misses;
59857 +extern atomic_unchecked_t hardirqs_on_events;
59858 +extern atomic_unchecked_t hardirqs_off_events;
59859 +extern atomic_unchecked_t redundant_hardirqs_on;
59860 +extern atomic_unchecked_t redundant_hardirqs_off;
59861 +extern atomic_unchecked_t softirqs_on_events;
59862 +extern atomic_unchecked_t softirqs_off_events;
59863 +extern atomic_unchecked_t redundant_softirqs_on;
59864 +extern atomic_unchecked_t redundant_softirqs_off;
59865 +extern atomic_unchecked_t nr_unused_locks;
59866 +extern atomic_unchecked_t nr_cyclic_checks;
59867 +extern atomic_unchecked_t nr_cyclic_check_recursions;
59868 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
59869 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
59870 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
59871 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
59872 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
59873 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
59874 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
59875 #else
59876 # define debug_atomic_inc(ptr) do { } while (0)
59877 # define debug_atomic_dec(ptr) do { } while (0)
59878 diff -urNp linux-2.6.32.42/kernel/lockdep_proc.c linux-2.6.32.42/kernel/lockdep_proc.c
59879 --- linux-2.6.32.42/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
59880 +++ linux-2.6.32.42/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
59881 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
59882
59883 static void print_name(struct seq_file *m, struct lock_class *class)
59884 {
59885 - char str[128];
59886 + char str[KSYM_NAME_LEN];
59887 const char *name = class->name;
59888
59889 if (!name) {
59890 diff -urNp linux-2.6.32.42/kernel/module.c linux-2.6.32.42/kernel/module.c
59891 --- linux-2.6.32.42/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
59892 +++ linux-2.6.32.42/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
59893 @@ -55,6 +55,7 @@
59894 #include <linux/async.h>
59895 #include <linux/percpu.h>
59896 #include <linux/kmemleak.h>
59897 +#include <linux/grsecurity.h>
59898
59899 #define CREATE_TRACE_POINTS
59900 #include <trace/events/module.h>
59901 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
59902 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
59903
59904 /* Bounds of module allocation, for speeding __module_address */
59905 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
59906 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
59907 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
59908
59909 int register_module_notifier(struct notifier_block * nb)
59910 {
59911 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
59912 return true;
59913
59914 list_for_each_entry_rcu(mod, &modules, list) {
59915 - struct symsearch arr[] = {
59916 + struct symsearch modarr[] = {
59917 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
59918 NOT_GPL_ONLY, false },
59919 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
59920 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
59921 #endif
59922 };
59923
59924 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
59925 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
59926 return true;
59927 }
59928 return false;
59929 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
59930 void *ptr;
59931 int cpu;
59932
59933 - if (align > PAGE_SIZE) {
59934 + if (align-1 >= PAGE_SIZE) {
59935 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
59936 name, align, PAGE_SIZE);
59937 align = PAGE_SIZE;
59938 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
59939 * /sys/module/foo/sections stuff
59940 * J. Corbet <corbet@lwn.net>
59941 */
59942 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
59943 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59944
59945 static inline bool sect_empty(const Elf_Shdr *sect)
59946 {
59947 @@ -1545,7 +1547,8 @@ static void free_module(struct module *m
59948 destroy_params(mod->kp, mod->num_kp);
59949
59950 /* This may be NULL, but that's OK */
59951 - module_free(mod, mod->module_init);
59952 + module_free(mod, mod->module_init_rw);
59953 + module_free_exec(mod, mod->module_init_rx);
59954 kfree(mod->args);
59955 if (mod->percpu)
59956 percpu_modfree(mod->percpu);
59957 @@ -1554,10 +1557,12 @@ static void free_module(struct module *m
59958 percpu_modfree(mod->refptr);
59959 #endif
59960 /* Free lock-classes: */
59961 - lockdep_free_key_range(mod->module_core, mod->core_size);
59962 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
59963 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
59964
59965 /* Finally, free the core (containing the module structure) */
59966 - module_free(mod, mod->module_core);
59967 + module_free_exec(mod, mod->module_core_rx);
59968 + module_free(mod, mod->module_core_rw);
59969
59970 #ifdef CONFIG_MPU
59971 update_protections(current->mm);
59972 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
59973 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
59974 int ret = 0;
59975 const struct kernel_symbol *ksym;
59976 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59977 + int is_fs_load = 0;
59978 + int register_filesystem_found = 0;
59979 + char *p;
59980 +
59981 + p = strstr(mod->args, "grsec_modharden_fs");
59982 +
59983 + if (p) {
59984 + char *endptr = p + strlen("grsec_modharden_fs");
59985 + /* copy \0 as well */
59986 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
59987 + is_fs_load = 1;
59988 + }
59989 +#endif
59990 +
59991
59992 for (i = 1; i < n; i++) {
59993 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59994 + const char *name = strtab + sym[i].st_name;
59995 +
59996 + /* it's a real shame this will never get ripped and copied
59997 + upstream! ;(
59998 + */
59999 + if (is_fs_load && !strcmp(name, "register_filesystem"))
60000 + register_filesystem_found = 1;
60001 +#endif
60002 switch (sym[i].st_shndx) {
60003 case SHN_COMMON:
60004 /* We compiled with -fno-common. These are not
60005 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
60006 strtab + sym[i].st_name, mod);
60007 /* Ok if resolved. */
60008 if (ksym) {
60009 + pax_open_kernel();
60010 sym[i].st_value = ksym->value;
60011 + pax_close_kernel();
60012 break;
60013 }
60014
60015 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
60016 secbase = (unsigned long)mod->percpu;
60017 else
60018 secbase = sechdrs[sym[i].st_shndx].sh_addr;
60019 + pax_open_kernel();
60020 sym[i].st_value += secbase;
60021 + pax_close_kernel();
60022 break;
60023 }
60024 }
60025
60026 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60027 + if (is_fs_load && !register_filesystem_found) {
60028 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
60029 + ret = -EPERM;
60030 + }
60031 +#endif
60032 +
60033 return ret;
60034 }
60035
60036 @@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
60037 || s->sh_entsize != ~0UL
60038 || strstarts(secstrings + s->sh_name, ".init"))
60039 continue;
60040 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
60041 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60042 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
60043 + else
60044 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
60045 DEBUGP("\t%s\n", secstrings + s->sh_name);
60046 }
60047 - if (m == 0)
60048 - mod->core_text_size = mod->core_size;
60049 }
60050
60051 DEBUGP("Init section allocation order:\n");
60052 @@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
60053 || s->sh_entsize != ~0UL
60054 || !strstarts(secstrings + s->sh_name, ".init"))
60055 continue;
60056 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
60057 - | INIT_OFFSET_MASK);
60058 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60059 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
60060 + else
60061 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
60062 + s->sh_entsize |= INIT_OFFSET_MASK;
60063 DEBUGP("\t%s\n", secstrings + s->sh_name);
60064 }
60065 - if (m == 0)
60066 - mod->init_text_size = mod->init_size;
60067 }
60068 }
60069
60070 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
60071
60072 /* As per nm */
60073 static char elf_type(const Elf_Sym *sym,
60074 - Elf_Shdr *sechdrs,
60075 - const char *secstrings,
60076 - struct module *mod)
60077 + const Elf_Shdr *sechdrs,
60078 + const char *secstrings)
60079 {
60080 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
60081 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
60082 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
60083
60084 /* Put symbol section at end of init part of module. */
60085 symsect->sh_flags |= SHF_ALLOC;
60086 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
60087 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
60088 symindex) | INIT_OFFSET_MASK;
60089 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
60090
60091 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
60092 }
60093
60094 /* Append room for core symbols at end of core part. */
60095 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
60096 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
60097 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
60098 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
60099
60100 /* Put string table section at end of init part of module. */
60101 strsect->sh_flags |= SHF_ALLOC;
60102 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
60103 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
60104 strindex) | INIT_OFFSET_MASK;
60105 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
60106
60107 /* Append room for core symbols' strings at end of core part. */
60108 - *pstroffs = mod->core_size;
60109 + *pstroffs = mod->core_size_rx;
60110 __set_bit(0, strmap);
60111 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
60112 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
60113
60114 return symoffs;
60115 }
60116 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
60117 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
60118 mod->strtab = (void *)sechdrs[strindex].sh_addr;
60119
60120 + pax_open_kernel();
60121 +
60122 /* Set types up while we still have access to sections. */
60123 for (i = 0; i < mod->num_symtab; i++)
60124 mod->symtab[i].st_info
60125 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
60126 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
60127
60128 - mod->core_symtab = dst = mod->module_core + symoffs;
60129 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
60130 src = mod->symtab;
60131 *dst = *src;
60132 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
60133 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
60134 }
60135 mod->core_num_syms = ndst;
60136
60137 - mod->core_strtab = s = mod->module_core + stroffs;
60138 + mod->core_strtab = s = mod->module_core_rx + stroffs;
60139 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
60140 if (test_bit(i, strmap))
60141 *++s = mod->strtab[i];
60142 +
60143 + pax_close_kernel();
60144 }
60145 #else
60146 static inline unsigned long layout_symtab(struct module *mod,
60147 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
60148 #endif
60149 }
60150
60151 -static void *module_alloc_update_bounds(unsigned long size)
60152 +static void *module_alloc_update_bounds_rw(unsigned long size)
60153 {
60154 void *ret = module_alloc(size);
60155
60156 if (ret) {
60157 /* Update module bounds. */
60158 - if ((unsigned long)ret < module_addr_min)
60159 - module_addr_min = (unsigned long)ret;
60160 - if ((unsigned long)ret + size > module_addr_max)
60161 - module_addr_max = (unsigned long)ret + size;
60162 + if ((unsigned long)ret < module_addr_min_rw)
60163 + module_addr_min_rw = (unsigned long)ret;
60164 + if ((unsigned long)ret + size > module_addr_max_rw)
60165 + module_addr_max_rw = (unsigned long)ret + size;
60166 + }
60167 + return ret;
60168 +}
60169 +
60170 +static void *module_alloc_update_bounds_rx(unsigned long size)
60171 +{
60172 + void *ret = module_alloc_exec(size);
60173 +
60174 + if (ret) {
60175 + /* Update module bounds. */
60176 + if ((unsigned long)ret < module_addr_min_rx)
60177 + module_addr_min_rx = (unsigned long)ret;
60178 + if ((unsigned long)ret + size > module_addr_max_rx)
60179 + module_addr_max_rx = (unsigned long)ret + size;
60180 }
60181 return ret;
60182 }
60183 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
60184 unsigned int i;
60185
60186 /* only scan the sections containing data */
60187 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
60188 - (unsigned long)mod->module_core,
60189 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
60190 + (unsigned long)mod->module_core_rw,
60191 sizeof(struct module), GFP_KERNEL);
60192
60193 for (i = 1; i < hdr->e_shnum; i++) {
60194 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
60195 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
60196 continue;
60197
60198 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
60199 - (unsigned long)mod->module_core,
60200 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
60201 + (unsigned long)mod->module_core_rw,
60202 sechdrs[i].sh_size, GFP_KERNEL);
60203 }
60204 }
60205 @@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
60206 secstrings, &stroffs, strmap);
60207
60208 /* Do the allocs. */
60209 - ptr = module_alloc_update_bounds(mod->core_size);
60210 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
60211 /*
60212 * The pointer to this block is stored in the module structure
60213 * which is inside the block. Just mark it as not being a
60214 @@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
60215 err = -ENOMEM;
60216 goto free_percpu;
60217 }
60218 - memset(ptr, 0, mod->core_size);
60219 - mod->module_core = ptr;
60220 + memset(ptr, 0, mod->core_size_rw);
60221 + mod->module_core_rw = ptr;
60222
60223 - ptr = module_alloc_update_bounds(mod->init_size);
60224 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
60225 /*
60226 * The pointer to this block is stored in the module structure
60227 * which is inside the block. This block doesn't need to be
60228 * scanned as it contains data and code that will be freed
60229 * after the module is initialized.
60230 */
60231 - kmemleak_ignore(ptr);
60232 - if (!ptr && mod->init_size) {
60233 + kmemleak_not_leak(ptr);
60234 + if (!ptr && mod->init_size_rw) {
60235 + err = -ENOMEM;
60236 + goto free_core_rw;
60237 + }
60238 + memset(ptr, 0, mod->init_size_rw);
60239 + mod->module_init_rw = ptr;
60240 +
60241 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
60242 + kmemleak_not_leak(ptr);
60243 + if (!ptr) {
60244 err = -ENOMEM;
60245 - goto free_core;
60246 + goto free_init_rw;
60247 }
60248 - memset(ptr, 0, mod->init_size);
60249 - mod->module_init = ptr;
60250 +
60251 + pax_open_kernel();
60252 + memset(ptr, 0, mod->core_size_rx);
60253 + pax_close_kernel();
60254 + mod->module_core_rx = ptr;
60255 +
60256 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
60257 + kmemleak_not_leak(ptr);
60258 + if (!ptr && mod->init_size_rx) {
60259 + err = -ENOMEM;
60260 + goto free_core_rx;
60261 + }
60262 +
60263 + pax_open_kernel();
60264 + memset(ptr, 0, mod->init_size_rx);
60265 + pax_close_kernel();
60266 + mod->module_init_rx = ptr;
60267
60268 /* Transfer each section which specifies SHF_ALLOC */
60269 DEBUGP("final section addresses:\n");
60270 @@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
60271 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
60272 continue;
60273
60274 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
60275 - dest = mod->module_init
60276 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60277 - else
60278 - dest = mod->module_core + sechdrs[i].sh_entsize;
60279 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
60280 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
60281 + dest = mod->module_init_rw
60282 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60283 + else
60284 + dest = mod->module_init_rx
60285 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60286 + } else {
60287 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
60288 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
60289 + else
60290 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
60291 + }
60292 +
60293 + if (sechdrs[i].sh_type != SHT_NOBITS) {
60294
60295 - if (sechdrs[i].sh_type != SHT_NOBITS)
60296 - memcpy(dest, (void *)sechdrs[i].sh_addr,
60297 - sechdrs[i].sh_size);
60298 +#ifdef CONFIG_PAX_KERNEXEC
60299 +#ifdef CONFIG_X86_64
60300 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
60301 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
60302 +#endif
60303 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
60304 + pax_open_kernel();
60305 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
60306 + pax_close_kernel();
60307 + } else
60308 +#endif
60309 +
60310 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
60311 + }
60312 /* Update sh_addr to point to copy in image. */
60313 - sechdrs[i].sh_addr = (unsigned long)dest;
60314 +
60315 +#ifdef CONFIG_PAX_KERNEXEC
60316 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
60317 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
60318 + else
60319 +#endif
60320 +
60321 + sechdrs[i].sh_addr = (unsigned long)dest;
60322 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
60323 }
60324 /* Module has been moved. */
60325 @@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
60326 mod->name);
60327 if (!mod->refptr) {
60328 err = -ENOMEM;
60329 - goto free_init;
60330 + goto free_init_rx;
60331 }
60332 #endif
60333 /* Now we've moved module, initialize linked lists, etc. */
60334 @@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
60335 /* Set up MODINFO_ATTR fields */
60336 setup_modinfo(mod, sechdrs, infoindex);
60337
60338 + mod->args = args;
60339 +
60340 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60341 + {
60342 + char *p, *p2;
60343 +
60344 + if (strstr(mod->args, "grsec_modharden_netdev")) {
60345 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
60346 + err = -EPERM;
60347 + goto cleanup;
60348 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
60349 + p += strlen("grsec_modharden_normal");
60350 + p2 = strstr(p, "_");
60351 + if (p2) {
60352 + *p2 = '\0';
60353 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
60354 + *p2 = '_';
60355 + }
60356 + err = -EPERM;
60357 + goto cleanup;
60358 + }
60359 + }
60360 +#endif
60361 +
60362 +
60363 /* Fix up syms, so that st_value is a pointer to location. */
60364 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
60365 mod);
60366 @@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
60367
60368 /* Now do relocations. */
60369 for (i = 1; i < hdr->e_shnum; i++) {
60370 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
60371 unsigned int info = sechdrs[i].sh_info;
60372 + strtab = (char *)sechdrs[strindex].sh_addr;
60373
60374 /* Not a valid relocation section? */
60375 if (info >= hdr->e_shnum)
60376 @@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
60377 * Do it before processing of module parameters, so the module
60378 * can provide parameter accessor functions of its own.
60379 */
60380 - if (mod->module_init)
60381 - flush_icache_range((unsigned long)mod->module_init,
60382 - (unsigned long)mod->module_init
60383 - + mod->init_size);
60384 - flush_icache_range((unsigned long)mod->module_core,
60385 - (unsigned long)mod->module_core + mod->core_size);
60386 + if (mod->module_init_rx)
60387 + flush_icache_range((unsigned long)mod->module_init_rx,
60388 + (unsigned long)mod->module_init_rx
60389 + + mod->init_size_rx);
60390 + flush_icache_range((unsigned long)mod->module_core_rx,
60391 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
60392
60393 set_fs(old_fs);
60394
60395 - mod->args = args;
60396 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
60397 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
60398 mod->name);
60399 @@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
60400 free_unload:
60401 module_unload_free(mod);
60402 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
60403 + free_init_rx:
60404 percpu_modfree(mod->refptr);
60405 - free_init:
60406 #endif
60407 - module_free(mod, mod->module_init);
60408 - free_core:
60409 - module_free(mod, mod->module_core);
60410 + module_free_exec(mod, mod->module_init_rx);
60411 + free_core_rx:
60412 + module_free_exec(mod, mod->module_core_rx);
60413 + free_init_rw:
60414 + module_free(mod, mod->module_init_rw);
60415 + free_core_rw:
60416 + module_free(mod, mod->module_core_rw);
60417 /* mod will be freed with core. Don't access it beyond this line! */
60418 free_percpu:
60419 if (percpu)
60420 @@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
60421 mod->symtab = mod->core_symtab;
60422 mod->strtab = mod->core_strtab;
60423 #endif
60424 - module_free(mod, mod->module_init);
60425 - mod->module_init = NULL;
60426 - mod->init_size = 0;
60427 - mod->init_text_size = 0;
60428 + module_free(mod, mod->module_init_rw);
60429 + module_free_exec(mod, mod->module_init_rx);
60430 + mod->module_init_rw = NULL;
60431 + mod->module_init_rx = NULL;
60432 + mod->init_size_rw = 0;
60433 + mod->init_size_rx = 0;
60434 mutex_unlock(&module_mutex);
60435
60436 return 0;
60437 @@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
60438 unsigned long nextval;
60439
60440 /* At worse, next value is at end of module */
60441 - if (within_module_init(addr, mod))
60442 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
60443 + if (within_module_init_rx(addr, mod))
60444 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
60445 + else if (within_module_init_rw(addr, mod))
60446 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
60447 + else if (within_module_core_rx(addr, mod))
60448 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
60449 + else if (within_module_core_rw(addr, mod))
60450 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
60451 else
60452 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
60453 + return NULL;
60454
60455 /* Scan for closest preceeding symbol, and next symbol. (ELF
60456 starts real symbols at 1). */
60457 @@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
60458 char buf[8];
60459
60460 seq_printf(m, "%s %u",
60461 - mod->name, mod->init_size + mod->core_size);
60462 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
60463 print_unload_info(m, mod);
60464
60465 /* Informative for users. */
60466 @@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
60467 mod->state == MODULE_STATE_COMING ? "Loading":
60468 "Live");
60469 /* Used by oprofile and other similar tools. */
60470 - seq_printf(m, " 0x%p", mod->module_core);
60471 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
60472
60473 /* Taints info */
60474 if (mod->taints)
60475 @@ -2981,7 +3128,17 @@ static const struct file_operations proc
60476
60477 static int __init proc_modules_init(void)
60478 {
60479 +#ifndef CONFIG_GRKERNSEC_HIDESYM
60480 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60481 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60482 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60483 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
60484 +#else
60485 proc_create("modules", 0, NULL, &proc_modules_operations);
60486 +#endif
60487 +#else
60488 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60489 +#endif
60490 return 0;
60491 }
60492 module_init(proc_modules_init);
60493 @@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
60494 {
60495 struct module *mod;
60496
60497 - if (addr < module_addr_min || addr > module_addr_max)
60498 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
60499 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
60500 return NULL;
60501
60502 list_for_each_entry_rcu(mod, &modules, list)
60503 - if (within_module_core(addr, mod)
60504 - || within_module_init(addr, mod))
60505 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
60506 return mod;
60507 return NULL;
60508 }
60509 @@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
60510 */
60511 struct module *__module_text_address(unsigned long addr)
60512 {
60513 - struct module *mod = __module_address(addr);
60514 + struct module *mod;
60515 +
60516 +#ifdef CONFIG_X86_32
60517 + addr = ktla_ktva(addr);
60518 +#endif
60519 +
60520 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
60521 + return NULL;
60522 +
60523 + mod = __module_address(addr);
60524 +
60525 if (mod) {
60526 /* Make sure it's within the text section. */
60527 - if (!within(addr, mod->module_init, mod->init_text_size)
60528 - && !within(addr, mod->module_core, mod->core_text_size))
60529 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
60530 mod = NULL;
60531 }
60532 return mod;
60533 diff -urNp linux-2.6.32.42/kernel/mutex.c linux-2.6.32.42/kernel/mutex.c
60534 --- linux-2.6.32.42/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
60535 +++ linux-2.6.32.42/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
60536 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
60537 */
60538
60539 for (;;) {
60540 - struct thread_info *owner;
60541 + struct task_struct *owner;
60542
60543 /*
60544 * If we own the BKL, then don't spin. The owner of
60545 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
60546 spin_lock_mutex(&lock->wait_lock, flags);
60547
60548 debug_mutex_lock_common(lock, &waiter);
60549 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
60550 + debug_mutex_add_waiter(lock, &waiter, task);
60551
60552 /* add waiting tasks to the end of the waitqueue (FIFO): */
60553 list_add_tail(&waiter.list, &lock->wait_list);
60554 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
60555 * TASK_UNINTERRUPTIBLE case.)
60556 */
60557 if (unlikely(signal_pending_state(state, task))) {
60558 - mutex_remove_waiter(lock, &waiter,
60559 - task_thread_info(task));
60560 + mutex_remove_waiter(lock, &waiter, task);
60561 mutex_release(&lock->dep_map, 1, ip);
60562 spin_unlock_mutex(&lock->wait_lock, flags);
60563
60564 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
60565 done:
60566 lock_acquired(&lock->dep_map, ip);
60567 /* got the lock - rejoice! */
60568 - mutex_remove_waiter(lock, &waiter, current_thread_info());
60569 + mutex_remove_waiter(lock, &waiter, task);
60570 mutex_set_owner(lock);
60571
60572 /* set it to 0 if there are no waiters left: */
60573 diff -urNp linux-2.6.32.42/kernel/mutex-debug.c linux-2.6.32.42/kernel/mutex-debug.c
60574 --- linux-2.6.32.42/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
60575 +++ linux-2.6.32.42/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
60576 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
60577 }
60578
60579 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60580 - struct thread_info *ti)
60581 + struct task_struct *task)
60582 {
60583 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
60584
60585 /* Mark the current thread as blocked on the lock: */
60586 - ti->task->blocked_on = waiter;
60587 + task->blocked_on = waiter;
60588 }
60589
60590 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60591 - struct thread_info *ti)
60592 + struct task_struct *task)
60593 {
60594 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
60595 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
60596 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
60597 - ti->task->blocked_on = NULL;
60598 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
60599 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
60600 + task->blocked_on = NULL;
60601
60602 list_del_init(&waiter->list);
60603 waiter->task = NULL;
60604 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
60605 return;
60606
60607 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
60608 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
60609 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
60610 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
60611 mutex_clear_owner(lock);
60612 }
60613 diff -urNp linux-2.6.32.42/kernel/mutex-debug.h linux-2.6.32.42/kernel/mutex-debug.h
60614 --- linux-2.6.32.42/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
60615 +++ linux-2.6.32.42/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
60616 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
60617 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
60618 extern void debug_mutex_add_waiter(struct mutex *lock,
60619 struct mutex_waiter *waiter,
60620 - struct thread_info *ti);
60621 + struct task_struct *task);
60622 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60623 - struct thread_info *ti);
60624 + struct task_struct *task);
60625 extern void debug_mutex_unlock(struct mutex *lock);
60626 extern void debug_mutex_init(struct mutex *lock, const char *name,
60627 struct lock_class_key *key);
60628
60629 static inline void mutex_set_owner(struct mutex *lock)
60630 {
60631 - lock->owner = current_thread_info();
60632 + lock->owner = current;
60633 }
60634
60635 static inline void mutex_clear_owner(struct mutex *lock)
60636 diff -urNp linux-2.6.32.42/kernel/mutex.h linux-2.6.32.42/kernel/mutex.h
60637 --- linux-2.6.32.42/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
60638 +++ linux-2.6.32.42/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
60639 @@ -19,7 +19,7 @@
60640 #ifdef CONFIG_SMP
60641 static inline void mutex_set_owner(struct mutex *lock)
60642 {
60643 - lock->owner = current_thread_info();
60644 + lock->owner = current;
60645 }
60646
60647 static inline void mutex_clear_owner(struct mutex *lock)
60648 diff -urNp linux-2.6.32.42/kernel/panic.c linux-2.6.32.42/kernel/panic.c
60649 --- linux-2.6.32.42/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
60650 +++ linux-2.6.32.42/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
60651 @@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
60652 const char *board;
60653
60654 printk(KERN_WARNING "------------[ cut here ]------------\n");
60655 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
60656 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
60657 board = dmi_get_system_info(DMI_PRODUCT_NAME);
60658 if (board)
60659 printk(KERN_WARNING "Hardware name: %s\n", board);
60660 @@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
60661 */
60662 void __stack_chk_fail(void)
60663 {
60664 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
60665 + dump_stack();
60666 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
60667 __builtin_return_address(0));
60668 }
60669 EXPORT_SYMBOL(__stack_chk_fail);
60670 diff -urNp linux-2.6.32.42/kernel/params.c linux-2.6.32.42/kernel/params.c
60671 --- linux-2.6.32.42/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
60672 +++ linux-2.6.32.42/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
60673 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
60674 return ret;
60675 }
60676
60677 -static struct sysfs_ops module_sysfs_ops = {
60678 +static const struct sysfs_ops module_sysfs_ops = {
60679 .show = module_attr_show,
60680 .store = module_attr_store,
60681 };
60682 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
60683 return 0;
60684 }
60685
60686 -static struct kset_uevent_ops module_uevent_ops = {
60687 +static const struct kset_uevent_ops module_uevent_ops = {
60688 .filter = uevent_filter,
60689 };
60690
60691 diff -urNp linux-2.6.32.42/kernel/perf_event.c linux-2.6.32.42/kernel/perf_event.c
60692 --- linux-2.6.32.42/kernel/perf_event.c 2011-04-17 17:00:52.000000000 -0400
60693 +++ linux-2.6.32.42/kernel/perf_event.c 2011-05-04 17:56:28.000000000 -0400
60694 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
60695 */
60696 int sysctl_perf_event_sample_rate __read_mostly = 100000;
60697
60698 -static atomic64_t perf_event_id;
60699 +static atomic64_unchecked_t perf_event_id;
60700
60701 /*
60702 * Lock for (sysadmin-configurable) event reservations:
60703 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
60704 * In order to keep per-task stats reliable we need to flip the event
60705 * values when we flip the contexts.
60706 */
60707 - value = atomic64_read(&next_event->count);
60708 - value = atomic64_xchg(&event->count, value);
60709 - atomic64_set(&next_event->count, value);
60710 + value = atomic64_read_unchecked(&next_event->count);
60711 + value = atomic64_xchg_unchecked(&event->count, value);
60712 + atomic64_set_unchecked(&next_event->count, value);
60713
60714 swap(event->total_time_enabled, next_event->total_time_enabled);
60715 swap(event->total_time_running, next_event->total_time_running);
60716 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
60717 update_event_times(event);
60718 }
60719
60720 - return atomic64_read(&event->count);
60721 + return atomic64_read_unchecked(&event->count);
60722 }
60723
60724 /*
60725 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
60726 values[n++] = 1 + leader->nr_siblings;
60727 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60728 values[n++] = leader->total_time_enabled +
60729 - atomic64_read(&leader->child_total_time_enabled);
60730 + atomic64_read_unchecked(&leader->child_total_time_enabled);
60731 }
60732 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60733 values[n++] = leader->total_time_running +
60734 - atomic64_read(&leader->child_total_time_running);
60735 + atomic64_read_unchecked(&leader->child_total_time_running);
60736 }
60737
60738 size = n * sizeof(u64);
60739 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
60740 values[n++] = perf_event_read_value(event);
60741 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60742 values[n++] = event->total_time_enabled +
60743 - atomic64_read(&event->child_total_time_enabled);
60744 + atomic64_read_unchecked(&event->child_total_time_enabled);
60745 }
60746 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60747 values[n++] = event->total_time_running +
60748 - atomic64_read(&event->child_total_time_running);
60749 + atomic64_read_unchecked(&event->child_total_time_running);
60750 }
60751 if (read_format & PERF_FORMAT_ID)
60752 values[n++] = primary_event_id(event);
60753 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
60754 static void perf_event_reset(struct perf_event *event)
60755 {
60756 (void)perf_event_read(event);
60757 - atomic64_set(&event->count, 0);
60758 + atomic64_set_unchecked(&event->count, 0);
60759 perf_event_update_userpage(event);
60760 }
60761
60762 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
60763 ++userpg->lock;
60764 barrier();
60765 userpg->index = perf_event_index(event);
60766 - userpg->offset = atomic64_read(&event->count);
60767 + userpg->offset = atomic64_read_unchecked(&event->count);
60768 if (event->state == PERF_EVENT_STATE_ACTIVE)
60769 - userpg->offset -= atomic64_read(&event->hw.prev_count);
60770 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
60771
60772 userpg->time_enabled = event->total_time_enabled +
60773 - atomic64_read(&event->child_total_time_enabled);
60774 + atomic64_read_unchecked(&event->child_total_time_enabled);
60775
60776 userpg->time_running = event->total_time_running +
60777 - atomic64_read(&event->child_total_time_running);
60778 + atomic64_read_unchecked(&event->child_total_time_running);
60779
60780 barrier();
60781 ++userpg->lock;
60782 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
60783 u64 values[4];
60784 int n = 0;
60785
60786 - values[n++] = atomic64_read(&event->count);
60787 + values[n++] = atomic64_read_unchecked(&event->count);
60788 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60789 values[n++] = event->total_time_enabled +
60790 - atomic64_read(&event->child_total_time_enabled);
60791 + atomic64_read_unchecked(&event->child_total_time_enabled);
60792 }
60793 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60794 values[n++] = event->total_time_running +
60795 - atomic64_read(&event->child_total_time_running);
60796 + atomic64_read_unchecked(&event->child_total_time_running);
60797 }
60798 if (read_format & PERF_FORMAT_ID)
60799 values[n++] = primary_event_id(event);
60800 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
60801 if (leader != event)
60802 leader->pmu->read(leader);
60803
60804 - values[n++] = atomic64_read(&leader->count);
60805 + values[n++] = atomic64_read_unchecked(&leader->count);
60806 if (read_format & PERF_FORMAT_ID)
60807 values[n++] = primary_event_id(leader);
60808
60809 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
60810 if (sub != event)
60811 sub->pmu->read(sub);
60812
60813 - values[n++] = atomic64_read(&sub->count);
60814 + values[n++] = atomic64_read_unchecked(&sub->count);
60815 if (read_format & PERF_FORMAT_ID)
60816 values[n++] = primary_event_id(sub);
60817
60818 @@ -3787,7 +3787,7 @@ static void perf_swevent_add(struct perf
60819 {
60820 struct hw_perf_event *hwc = &event->hw;
60821
60822 - atomic64_add(nr, &event->count);
60823 + atomic64_add_unchecked(nr, &event->count);
60824
60825 if (!hwc->sample_period)
60826 return;
60827 @@ -4044,9 +4044,9 @@ static void cpu_clock_perf_event_update(
60828 u64 now;
60829
60830 now = cpu_clock(cpu);
60831 - prev = atomic64_read(&event->hw.prev_count);
60832 - atomic64_set(&event->hw.prev_count, now);
60833 - atomic64_add(now - prev, &event->count);
60834 + prev = atomic64_read_unchecked(&event->hw.prev_count);
60835 + atomic64_set_unchecked(&event->hw.prev_count, now);
60836 + atomic64_add_unchecked(now - prev, &event->count);
60837 }
60838
60839 static int cpu_clock_perf_event_enable(struct perf_event *event)
60840 @@ -4054,7 +4054,7 @@ static int cpu_clock_perf_event_enable(s
60841 struct hw_perf_event *hwc = &event->hw;
60842 int cpu = raw_smp_processor_id();
60843
60844 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
60845 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
60846 perf_swevent_start_hrtimer(event);
60847
60848 return 0;
60849 @@ -4086,9 +4086,9 @@ static void task_clock_perf_event_update
60850 u64 prev;
60851 s64 delta;
60852
60853 - prev = atomic64_xchg(&event->hw.prev_count, now);
60854 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
60855 delta = now - prev;
60856 - atomic64_add(delta, &event->count);
60857 + atomic64_add_unchecked(delta, &event->count);
60858 }
60859
60860 static int task_clock_perf_event_enable(struct perf_event *event)
60861 @@ -4098,7 +4098,7 @@ static int task_clock_perf_event_enable(
60862
60863 now = event->ctx->time;
60864
60865 - atomic64_set(&hwc->prev_count, now);
60866 + atomic64_set_unchecked(&hwc->prev_count, now);
60867
60868 perf_swevent_start_hrtimer(event);
60869
60870 @@ -4293,7 +4293,7 @@ perf_event_alloc(struct perf_event_attr
60871 event->parent = parent_event;
60872
60873 event->ns = get_pid_ns(current->nsproxy->pid_ns);
60874 - event->id = atomic64_inc_return(&perf_event_id);
60875 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
60876
60877 event->state = PERF_EVENT_STATE_INACTIVE;
60878
60879 @@ -4724,15 +4724,15 @@ static void sync_child_event(struct perf
60880 if (child_event->attr.inherit_stat)
60881 perf_event_read_event(child_event, child);
60882
60883 - child_val = atomic64_read(&child_event->count);
60884 + child_val = atomic64_read_unchecked(&child_event->count);
60885
60886 /*
60887 * Add back the child's count to the parent's count:
60888 */
60889 - atomic64_add(child_val, &parent_event->count);
60890 - atomic64_add(child_event->total_time_enabled,
60891 + atomic64_add_unchecked(child_val, &parent_event->count);
60892 + atomic64_add_unchecked(child_event->total_time_enabled,
60893 &parent_event->child_total_time_enabled);
60894 - atomic64_add(child_event->total_time_running,
60895 + atomic64_add_unchecked(child_event->total_time_running,
60896 &parent_event->child_total_time_running);
60897
60898 /*
60899 diff -urNp linux-2.6.32.42/kernel/pid.c linux-2.6.32.42/kernel/pid.c
60900 --- linux-2.6.32.42/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
60901 +++ linux-2.6.32.42/kernel/pid.c 2011-04-18 19:22:38.000000000 -0400
60902 @@ -33,6 +33,7 @@
60903 #include <linux/rculist.h>
60904 #include <linux/bootmem.h>
60905 #include <linux/hash.h>
60906 +#include <linux/security.h>
60907 #include <linux/pid_namespace.h>
60908 #include <linux/init_task.h>
60909 #include <linux/syscalls.h>
60910 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
60911
60912 int pid_max = PID_MAX_DEFAULT;
60913
60914 -#define RESERVED_PIDS 300
60915 +#define RESERVED_PIDS 500
60916
60917 int pid_max_min = RESERVED_PIDS + 1;
60918 int pid_max_max = PID_MAX_LIMIT;
60919 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
60920 */
60921 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
60922 {
60923 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
60924 + struct task_struct *task;
60925 +
60926 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
60927 +
60928 + if (gr_pid_is_chrooted(task))
60929 + return NULL;
60930 +
60931 + return task;
60932 }
60933
60934 struct task_struct *find_task_by_vpid(pid_t vnr)
60935 diff -urNp linux-2.6.32.42/kernel/posix-cpu-timers.c linux-2.6.32.42/kernel/posix-cpu-timers.c
60936 --- linux-2.6.32.42/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
60937 +++ linux-2.6.32.42/kernel/posix-cpu-timers.c 2011-04-17 15:56:46.000000000 -0400
60938 @@ -6,6 +6,7 @@
60939 #include <linux/posix-timers.h>
60940 #include <linux/errno.h>
60941 #include <linux/math64.h>
60942 +#include <linux/security.h>
60943 #include <asm/uaccess.h>
60944 #include <linux/kernel_stat.h>
60945 #include <trace/events/timer.h>
60946 diff -urNp linux-2.6.32.42/kernel/posix-timers.c linux-2.6.32.42/kernel/posix-timers.c
60947 --- linux-2.6.32.42/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
60948 +++ linux-2.6.32.42/kernel/posix-timers.c 2011-05-16 21:46:57.000000000 -0400
60949 @@ -42,6 +42,7 @@
60950 #include <linux/compiler.h>
60951 #include <linux/idr.h>
60952 #include <linux/posix-timers.h>
60953 +#include <linux/grsecurity.h>
60954 #include <linux/syscalls.h>
60955 #include <linux/wait.h>
60956 #include <linux/workqueue.h>
60957 @@ -296,6 +297,8 @@ static __init int init_posix_timers(void
60958 .nsleep = no_nsleep,
60959 };
60960
60961 + pax_track_stack();
60962 +
60963 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
60964 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
60965 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
60966 @@ -948,6 +951,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
60967 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
60968 return -EFAULT;
60969
60970 + /* only the CLOCK_REALTIME clock can be set, all other clocks
60971 + have their clock_set fptr set to a nosettime dummy function
60972 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
60973 + call common_clock_set, which calls do_sys_settimeofday, which
60974 + we hook
60975 + */
60976 +
60977 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
60978 }
60979
60980 diff -urNp linux-2.6.32.42/kernel/power/hibernate.c linux-2.6.32.42/kernel/power/hibernate.c
60981 --- linux-2.6.32.42/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
60982 +++ linux-2.6.32.42/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
60983 @@ -48,14 +48,14 @@ enum {
60984
60985 static int hibernation_mode = HIBERNATION_SHUTDOWN;
60986
60987 -static struct platform_hibernation_ops *hibernation_ops;
60988 +static const struct platform_hibernation_ops *hibernation_ops;
60989
60990 /**
60991 * hibernation_set_ops - set the global hibernate operations
60992 * @ops: the hibernation operations to use in subsequent hibernation transitions
60993 */
60994
60995 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
60996 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
60997 {
60998 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
60999 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
61000 diff -urNp linux-2.6.32.42/kernel/power/poweroff.c linux-2.6.32.42/kernel/power/poweroff.c
61001 --- linux-2.6.32.42/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
61002 +++ linux-2.6.32.42/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
61003 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
61004 .enable_mask = SYSRQ_ENABLE_BOOT,
61005 };
61006
61007 -static int pm_sysrq_init(void)
61008 +static int __init pm_sysrq_init(void)
61009 {
61010 register_sysrq_key('o', &sysrq_poweroff_op);
61011 return 0;
61012 diff -urNp linux-2.6.32.42/kernel/power/process.c linux-2.6.32.42/kernel/power/process.c
61013 --- linux-2.6.32.42/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
61014 +++ linux-2.6.32.42/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
61015 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
61016 struct timeval start, end;
61017 u64 elapsed_csecs64;
61018 unsigned int elapsed_csecs;
61019 + bool timedout = false;
61020
61021 do_gettimeofday(&start);
61022
61023 end_time = jiffies + TIMEOUT;
61024 do {
61025 todo = 0;
61026 + if (time_after(jiffies, end_time))
61027 + timedout = true;
61028 read_lock(&tasklist_lock);
61029 do_each_thread(g, p) {
61030 if (frozen(p) || !freezeable(p))
61031 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
61032 * It is "frozen enough". If the task does wake
61033 * up, it will immediately call try_to_freeze.
61034 */
61035 - if (!task_is_stopped_or_traced(p) &&
61036 - !freezer_should_skip(p))
61037 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
61038 todo++;
61039 + if (timedout) {
61040 + printk(KERN_ERR "Task refusing to freeze:\n");
61041 + sched_show_task(p);
61042 + }
61043 + }
61044 } while_each_thread(g, p);
61045 read_unlock(&tasklist_lock);
61046 yield(); /* Yield is okay here */
61047 - if (time_after(jiffies, end_time))
61048 - break;
61049 - } while (todo);
61050 + } while (todo && !timedout);
61051
61052 do_gettimeofday(&end);
61053 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
61054 diff -urNp linux-2.6.32.42/kernel/power/suspend.c linux-2.6.32.42/kernel/power/suspend.c
61055 --- linux-2.6.32.42/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
61056 +++ linux-2.6.32.42/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
61057 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
61058 [PM_SUSPEND_MEM] = "mem",
61059 };
61060
61061 -static struct platform_suspend_ops *suspend_ops;
61062 +static const struct platform_suspend_ops *suspend_ops;
61063
61064 /**
61065 * suspend_set_ops - Set the global suspend method table.
61066 * @ops: Pointer to ops structure.
61067 */
61068 -void suspend_set_ops(struct platform_suspend_ops *ops)
61069 +void suspend_set_ops(const struct platform_suspend_ops *ops)
61070 {
61071 mutex_lock(&pm_mutex);
61072 suspend_ops = ops;
61073 diff -urNp linux-2.6.32.42/kernel/printk.c linux-2.6.32.42/kernel/printk.c
61074 --- linux-2.6.32.42/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
61075 +++ linux-2.6.32.42/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
61076 @@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
61077 char c;
61078 int error = 0;
61079
61080 +#ifdef CONFIG_GRKERNSEC_DMESG
61081 + if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
61082 + return -EPERM;
61083 +#endif
61084 +
61085 error = security_syslog(type);
61086 if (error)
61087 return error;
61088 diff -urNp linux-2.6.32.42/kernel/profile.c linux-2.6.32.42/kernel/profile.c
61089 --- linux-2.6.32.42/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
61090 +++ linux-2.6.32.42/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
61091 @@ -39,7 +39,7 @@ struct profile_hit {
61092 /* Oprofile timer tick hook */
61093 static int (*timer_hook)(struct pt_regs *) __read_mostly;
61094
61095 -static atomic_t *prof_buffer;
61096 +static atomic_unchecked_t *prof_buffer;
61097 static unsigned long prof_len, prof_shift;
61098
61099 int prof_on __read_mostly;
61100 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
61101 hits[i].pc = 0;
61102 continue;
61103 }
61104 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61105 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61106 hits[i].hits = hits[i].pc = 0;
61107 }
61108 }
61109 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
61110 * Add the current hit(s) and flush the write-queue out
61111 * to the global buffer:
61112 */
61113 - atomic_add(nr_hits, &prof_buffer[pc]);
61114 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
61115 for (i = 0; i < NR_PROFILE_HIT; ++i) {
61116 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61117 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61118 hits[i].pc = hits[i].hits = 0;
61119 }
61120 out:
61121 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
61122 if (prof_on != type || !prof_buffer)
61123 return;
61124 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
61125 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61126 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61127 }
61128 #endif /* !CONFIG_SMP */
61129 EXPORT_SYMBOL_GPL(profile_hits);
61130 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
61131 return -EFAULT;
61132 buf++; p++; count--; read++;
61133 }
61134 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
61135 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
61136 if (copy_to_user(buf, (void *)pnt, count))
61137 return -EFAULT;
61138 read += count;
61139 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
61140 }
61141 #endif
61142 profile_discard_flip_buffers();
61143 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
61144 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
61145 return count;
61146 }
61147
61148 diff -urNp linux-2.6.32.42/kernel/ptrace.c linux-2.6.32.42/kernel/ptrace.c
61149 --- linux-2.6.32.42/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
61150 +++ linux-2.6.32.42/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
61151 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
61152 return ret;
61153 }
61154
61155 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
61156 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
61157 + unsigned int log)
61158 {
61159 const struct cred *cred = current_cred(), *tcred;
61160
61161 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
61162 cred->gid != tcred->egid ||
61163 cred->gid != tcred->sgid ||
61164 cred->gid != tcred->gid) &&
61165 - !capable(CAP_SYS_PTRACE)) {
61166 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61167 + (log && !capable(CAP_SYS_PTRACE)))
61168 + ) {
61169 rcu_read_unlock();
61170 return -EPERM;
61171 }
61172 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
61173 smp_rmb();
61174 if (task->mm)
61175 dumpable = get_dumpable(task->mm);
61176 - if (!dumpable && !capable(CAP_SYS_PTRACE))
61177 + if (!dumpable &&
61178 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61179 + (log && !capable(CAP_SYS_PTRACE))))
61180 return -EPERM;
61181
61182 return security_ptrace_access_check(task, mode);
61183 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
61184 {
61185 int err;
61186 task_lock(task);
61187 - err = __ptrace_may_access(task, mode);
61188 + err = __ptrace_may_access(task, mode, 0);
61189 + task_unlock(task);
61190 + return !err;
61191 +}
61192 +
61193 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
61194 +{
61195 + int err;
61196 + task_lock(task);
61197 + err = __ptrace_may_access(task, mode, 1);
61198 task_unlock(task);
61199 return !err;
61200 }
61201 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
61202 goto out;
61203
61204 task_lock(task);
61205 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
61206 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
61207 task_unlock(task);
61208 if (retval)
61209 goto unlock_creds;
61210 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
61211 goto unlock_tasklist;
61212
61213 task->ptrace = PT_PTRACED;
61214 - if (capable(CAP_SYS_PTRACE))
61215 + if (capable_nolog(CAP_SYS_PTRACE))
61216 task->ptrace |= PT_PTRACE_CAP;
61217
61218 __ptrace_link(task, current);
61219 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
61220 {
61221 int copied = 0;
61222
61223 + pax_track_stack();
61224 +
61225 while (len > 0) {
61226 char buf[128];
61227 int this_len, retval;
61228 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
61229 {
61230 int copied = 0;
61231
61232 + pax_track_stack();
61233 +
61234 while (len > 0) {
61235 char buf[128];
61236 int this_len, retval;
61237 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
61238 int ret = -EIO;
61239 siginfo_t siginfo;
61240
61241 + pax_track_stack();
61242 +
61243 switch (request) {
61244 case PTRACE_PEEKTEXT:
61245 case PTRACE_PEEKDATA:
61246 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
61247 ret = ptrace_setoptions(child, data);
61248 break;
61249 case PTRACE_GETEVENTMSG:
61250 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
61251 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
61252 break;
61253
61254 case PTRACE_GETSIGINFO:
61255 ret = ptrace_getsiginfo(child, &siginfo);
61256 if (!ret)
61257 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
61258 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
61259 &siginfo);
61260 break;
61261
61262 case PTRACE_SETSIGINFO:
61263 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
61264 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
61265 sizeof siginfo))
61266 ret = -EFAULT;
61267 else
61268 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
61269 goto out;
61270 }
61271
61272 + if (gr_handle_ptrace(child, request)) {
61273 + ret = -EPERM;
61274 + goto out_put_task_struct;
61275 + }
61276 +
61277 if (request == PTRACE_ATTACH) {
61278 ret = ptrace_attach(child);
61279 /*
61280 * Some architectures need to do book-keeping after
61281 * a ptrace attach.
61282 */
61283 - if (!ret)
61284 + if (!ret) {
61285 arch_ptrace_attach(child);
61286 + gr_audit_ptrace(child);
61287 + }
61288 goto out_put_task_struct;
61289 }
61290
61291 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
61292 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
61293 if (copied != sizeof(tmp))
61294 return -EIO;
61295 - return put_user(tmp, (unsigned long __user *)data);
61296 + return put_user(tmp, (__force unsigned long __user *)data);
61297 }
61298
61299 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
61300 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
61301 siginfo_t siginfo;
61302 int ret;
61303
61304 + pax_track_stack();
61305 +
61306 switch (request) {
61307 case PTRACE_PEEKTEXT:
61308 case PTRACE_PEEKDATA:
61309 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
61310 goto out;
61311 }
61312
61313 + if (gr_handle_ptrace(child, request)) {
61314 + ret = -EPERM;
61315 + goto out_put_task_struct;
61316 + }
61317 +
61318 if (request == PTRACE_ATTACH) {
61319 ret = ptrace_attach(child);
61320 /*
61321 * Some architectures need to do book-keeping after
61322 * a ptrace attach.
61323 */
61324 - if (!ret)
61325 + if (!ret) {
61326 arch_ptrace_attach(child);
61327 + gr_audit_ptrace(child);
61328 + }
61329 goto out_put_task_struct;
61330 }
61331
61332 diff -urNp linux-2.6.32.42/kernel/rcutorture.c linux-2.6.32.42/kernel/rcutorture.c
61333 --- linux-2.6.32.42/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
61334 +++ linux-2.6.32.42/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
61335 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
61336 { 0 };
61337 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
61338 { 0 };
61339 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61340 -static atomic_t n_rcu_torture_alloc;
61341 -static atomic_t n_rcu_torture_alloc_fail;
61342 -static atomic_t n_rcu_torture_free;
61343 -static atomic_t n_rcu_torture_mberror;
61344 -static atomic_t n_rcu_torture_error;
61345 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61346 +static atomic_unchecked_t n_rcu_torture_alloc;
61347 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
61348 +static atomic_unchecked_t n_rcu_torture_free;
61349 +static atomic_unchecked_t n_rcu_torture_mberror;
61350 +static atomic_unchecked_t n_rcu_torture_error;
61351 static long n_rcu_torture_timers;
61352 static struct list_head rcu_torture_removed;
61353 static cpumask_var_t shuffle_tmp_mask;
61354 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
61355
61356 spin_lock_bh(&rcu_torture_lock);
61357 if (list_empty(&rcu_torture_freelist)) {
61358 - atomic_inc(&n_rcu_torture_alloc_fail);
61359 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
61360 spin_unlock_bh(&rcu_torture_lock);
61361 return NULL;
61362 }
61363 - atomic_inc(&n_rcu_torture_alloc);
61364 + atomic_inc_unchecked(&n_rcu_torture_alloc);
61365 p = rcu_torture_freelist.next;
61366 list_del_init(p);
61367 spin_unlock_bh(&rcu_torture_lock);
61368 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
61369 static void
61370 rcu_torture_free(struct rcu_torture *p)
61371 {
61372 - atomic_inc(&n_rcu_torture_free);
61373 + atomic_inc_unchecked(&n_rcu_torture_free);
61374 spin_lock_bh(&rcu_torture_lock);
61375 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
61376 spin_unlock_bh(&rcu_torture_lock);
61377 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
61378 i = rp->rtort_pipe_count;
61379 if (i > RCU_TORTURE_PIPE_LEN)
61380 i = RCU_TORTURE_PIPE_LEN;
61381 - atomic_inc(&rcu_torture_wcount[i]);
61382 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61383 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61384 rp->rtort_mbtest = 0;
61385 rcu_torture_free(rp);
61386 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
61387 i = rp->rtort_pipe_count;
61388 if (i > RCU_TORTURE_PIPE_LEN)
61389 i = RCU_TORTURE_PIPE_LEN;
61390 - atomic_inc(&rcu_torture_wcount[i]);
61391 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61392 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61393 rp->rtort_mbtest = 0;
61394 list_del(&rp->rtort_free);
61395 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
61396 i = old_rp->rtort_pipe_count;
61397 if (i > RCU_TORTURE_PIPE_LEN)
61398 i = RCU_TORTURE_PIPE_LEN;
61399 - atomic_inc(&rcu_torture_wcount[i]);
61400 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61401 old_rp->rtort_pipe_count++;
61402 cur_ops->deferred_free(old_rp);
61403 }
61404 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
61405 return;
61406 }
61407 if (p->rtort_mbtest == 0)
61408 - atomic_inc(&n_rcu_torture_mberror);
61409 + atomic_inc_unchecked(&n_rcu_torture_mberror);
61410 spin_lock(&rand_lock);
61411 cur_ops->read_delay(&rand);
61412 n_rcu_torture_timers++;
61413 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
61414 continue;
61415 }
61416 if (p->rtort_mbtest == 0)
61417 - atomic_inc(&n_rcu_torture_mberror);
61418 + atomic_inc_unchecked(&n_rcu_torture_mberror);
61419 cur_ops->read_delay(&rand);
61420 preempt_disable();
61421 pipe_count = p->rtort_pipe_count;
61422 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
61423 rcu_torture_current,
61424 rcu_torture_current_version,
61425 list_empty(&rcu_torture_freelist),
61426 - atomic_read(&n_rcu_torture_alloc),
61427 - atomic_read(&n_rcu_torture_alloc_fail),
61428 - atomic_read(&n_rcu_torture_free),
61429 - atomic_read(&n_rcu_torture_mberror),
61430 + atomic_read_unchecked(&n_rcu_torture_alloc),
61431 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
61432 + atomic_read_unchecked(&n_rcu_torture_free),
61433 + atomic_read_unchecked(&n_rcu_torture_mberror),
61434 n_rcu_torture_timers);
61435 - if (atomic_read(&n_rcu_torture_mberror) != 0)
61436 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
61437 cnt += sprintf(&page[cnt], " !!!");
61438 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
61439 if (i > 1) {
61440 cnt += sprintf(&page[cnt], "!!! ");
61441 - atomic_inc(&n_rcu_torture_error);
61442 + atomic_inc_unchecked(&n_rcu_torture_error);
61443 WARN_ON_ONCE(1);
61444 }
61445 cnt += sprintf(&page[cnt], "Reader Pipe: ");
61446 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
61447 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
61448 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61449 cnt += sprintf(&page[cnt], " %d",
61450 - atomic_read(&rcu_torture_wcount[i]));
61451 + atomic_read_unchecked(&rcu_torture_wcount[i]));
61452 }
61453 cnt += sprintf(&page[cnt], "\n");
61454 if (cur_ops->stats)
61455 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
61456
61457 if (cur_ops->cleanup)
61458 cur_ops->cleanup();
61459 - if (atomic_read(&n_rcu_torture_error))
61460 + if (atomic_read_unchecked(&n_rcu_torture_error))
61461 rcu_torture_print_module_parms("End of test: FAILURE");
61462 else
61463 rcu_torture_print_module_parms("End of test: SUCCESS");
61464 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
61465
61466 rcu_torture_current = NULL;
61467 rcu_torture_current_version = 0;
61468 - atomic_set(&n_rcu_torture_alloc, 0);
61469 - atomic_set(&n_rcu_torture_alloc_fail, 0);
61470 - atomic_set(&n_rcu_torture_free, 0);
61471 - atomic_set(&n_rcu_torture_mberror, 0);
61472 - atomic_set(&n_rcu_torture_error, 0);
61473 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
61474 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
61475 + atomic_set_unchecked(&n_rcu_torture_free, 0);
61476 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
61477 + atomic_set_unchecked(&n_rcu_torture_error, 0);
61478 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
61479 - atomic_set(&rcu_torture_wcount[i], 0);
61480 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
61481 for_each_possible_cpu(cpu) {
61482 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61483 per_cpu(rcu_torture_count, cpu)[i] = 0;
61484 diff -urNp linux-2.6.32.42/kernel/rcutree.c linux-2.6.32.42/kernel/rcutree.c
61485 --- linux-2.6.32.42/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
61486 +++ linux-2.6.32.42/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
61487 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
61488 /*
61489 * Do softirq processing for the current CPU.
61490 */
61491 -static void rcu_process_callbacks(struct softirq_action *unused)
61492 +static void rcu_process_callbacks(void)
61493 {
61494 /*
61495 * Memory references from any prior RCU read-side critical sections
61496 diff -urNp linux-2.6.32.42/kernel/rcutree_plugin.h linux-2.6.32.42/kernel/rcutree_plugin.h
61497 --- linux-2.6.32.42/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
61498 +++ linux-2.6.32.42/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
61499 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
61500 */
61501 void __rcu_read_lock(void)
61502 {
61503 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
61504 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
61505 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
61506 }
61507 EXPORT_SYMBOL_GPL(__rcu_read_lock);
61508 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
61509 struct task_struct *t = current;
61510
61511 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
61512 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
61513 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
61514 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
61515 rcu_read_unlock_special(t);
61516 }
61517 diff -urNp linux-2.6.32.42/kernel/relay.c linux-2.6.32.42/kernel/relay.c
61518 --- linux-2.6.32.42/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
61519 +++ linux-2.6.32.42/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
61520 @@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
61521 unsigned int flags,
61522 int *nonpad_ret)
61523 {
61524 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
61525 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
61526 struct rchan_buf *rbuf = in->private_data;
61527 unsigned int subbuf_size = rbuf->chan->subbuf_size;
61528 uint64_t pos = (uint64_t) *ppos;
61529 @@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
61530 .ops = &relay_pipe_buf_ops,
61531 .spd_release = relay_page_release,
61532 };
61533 + ssize_t ret;
61534 +
61535 + pax_track_stack();
61536
61537 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
61538 return 0;
61539 diff -urNp linux-2.6.32.42/kernel/resource.c linux-2.6.32.42/kernel/resource.c
61540 --- linux-2.6.32.42/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
61541 +++ linux-2.6.32.42/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
61542 @@ -132,8 +132,18 @@ static const struct file_operations proc
61543
61544 static int __init ioresources_init(void)
61545 {
61546 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
61547 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61548 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
61549 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
61550 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61551 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
61552 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
61553 +#endif
61554 +#else
61555 proc_create("ioports", 0, NULL, &proc_ioports_operations);
61556 proc_create("iomem", 0, NULL, &proc_iomem_operations);
61557 +#endif
61558 return 0;
61559 }
61560 __initcall(ioresources_init);
61561 diff -urNp linux-2.6.32.42/kernel/rtmutex.c linux-2.6.32.42/kernel/rtmutex.c
61562 --- linux-2.6.32.42/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
61563 +++ linux-2.6.32.42/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
61564 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
61565 */
61566 spin_lock_irqsave(&pendowner->pi_lock, flags);
61567
61568 - WARN_ON(!pendowner->pi_blocked_on);
61569 + BUG_ON(!pendowner->pi_blocked_on);
61570 WARN_ON(pendowner->pi_blocked_on != waiter);
61571 WARN_ON(pendowner->pi_blocked_on->lock != lock);
61572
61573 diff -urNp linux-2.6.32.42/kernel/rtmutex-tester.c linux-2.6.32.42/kernel/rtmutex-tester.c
61574 --- linux-2.6.32.42/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
61575 +++ linux-2.6.32.42/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
61576 @@ -21,7 +21,7 @@
61577 #define MAX_RT_TEST_MUTEXES 8
61578
61579 static spinlock_t rttest_lock;
61580 -static atomic_t rttest_event;
61581 +static atomic_unchecked_t rttest_event;
61582
61583 struct test_thread_data {
61584 int opcode;
61585 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
61586
61587 case RTTEST_LOCKCONT:
61588 td->mutexes[td->opdata] = 1;
61589 - td->event = atomic_add_return(1, &rttest_event);
61590 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61591 return 0;
61592
61593 case RTTEST_RESET:
61594 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
61595 return 0;
61596
61597 case RTTEST_RESETEVENT:
61598 - atomic_set(&rttest_event, 0);
61599 + atomic_set_unchecked(&rttest_event, 0);
61600 return 0;
61601
61602 default:
61603 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
61604 return ret;
61605
61606 td->mutexes[id] = 1;
61607 - td->event = atomic_add_return(1, &rttest_event);
61608 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61609 rt_mutex_lock(&mutexes[id]);
61610 - td->event = atomic_add_return(1, &rttest_event);
61611 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61612 td->mutexes[id] = 4;
61613 return 0;
61614
61615 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
61616 return ret;
61617
61618 td->mutexes[id] = 1;
61619 - td->event = atomic_add_return(1, &rttest_event);
61620 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61621 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
61622 - td->event = atomic_add_return(1, &rttest_event);
61623 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61624 td->mutexes[id] = ret ? 0 : 4;
61625 return ret ? -EINTR : 0;
61626
61627 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
61628 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
61629 return ret;
61630
61631 - td->event = atomic_add_return(1, &rttest_event);
61632 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61633 rt_mutex_unlock(&mutexes[id]);
61634 - td->event = atomic_add_return(1, &rttest_event);
61635 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61636 td->mutexes[id] = 0;
61637 return 0;
61638
61639 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
61640 break;
61641
61642 td->mutexes[dat] = 2;
61643 - td->event = atomic_add_return(1, &rttest_event);
61644 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61645 break;
61646
61647 case RTTEST_LOCKBKL:
61648 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
61649 return;
61650
61651 td->mutexes[dat] = 3;
61652 - td->event = atomic_add_return(1, &rttest_event);
61653 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61654 break;
61655
61656 case RTTEST_LOCKNOWAIT:
61657 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
61658 return;
61659
61660 td->mutexes[dat] = 1;
61661 - td->event = atomic_add_return(1, &rttest_event);
61662 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61663 return;
61664
61665 case RTTEST_LOCKBKL:
61666 diff -urNp linux-2.6.32.42/kernel/sched.c linux-2.6.32.42/kernel/sched.c
61667 --- linux-2.6.32.42/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
61668 +++ linux-2.6.32.42/kernel/sched.c 2011-05-22 23:02:06.000000000 -0400
61669 @@ -5043,7 +5043,7 @@ out:
61670 * In CONFIG_NO_HZ case, the idle load balance owner will do the
61671 * rebalancing for all the cpus for whom scheduler ticks are stopped.
61672 */
61673 -static void run_rebalance_domains(struct softirq_action *h)
61674 +static void run_rebalance_domains(void)
61675 {
61676 int this_cpu = smp_processor_id();
61677 struct rq *this_rq = cpu_rq(this_cpu);
61678 @@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
61679 struct rq *rq;
61680 int cpu;
61681
61682 + pax_track_stack();
61683 +
61684 need_resched:
61685 preempt_disable();
61686 cpu = smp_processor_id();
61687 @@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
61688 * Look out! "owner" is an entirely speculative pointer
61689 * access and not reliable.
61690 */
61691 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
61692 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
61693 {
61694 unsigned int cpu;
61695 struct rq *rq;
61696 @@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lo
61697 * DEBUG_PAGEALLOC could have unmapped it if
61698 * the mutex owner just released it and exited.
61699 */
61700 - if (probe_kernel_address(&owner->cpu, cpu))
61701 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
61702 return 0;
61703 #else
61704 - cpu = owner->cpu;
61705 + cpu = task_thread_info(owner)->cpu;
61706 #endif
61707
61708 /*
61709 @@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lo
61710 /*
61711 * Is that owner really running on that cpu?
61712 */
61713 - if (task_thread_info(rq->curr) != owner || need_resched())
61714 + if (rq->curr != owner || need_resched())
61715 return 0;
61716
61717 cpu_relax();
61718 @@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p
61719 /* convert nice value [19,-20] to rlimit style value [1,40] */
61720 int nice_rlim = 20 - nice;
61721
61722 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
61723 +
61724 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
61725 capable(CAP_SYS_NICE));
61726 }
61727 @@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
61728 if (nice > 19)
61729 nice = 19;
61730
61731 - if (increment < 0 && !can_nice(current, nice))
61732 + if (increment < 0 && (!can_nice(current, nice) ||
61733 + gr_handle_chroot_nice()))
61734 return -EPERM;
61735
61736 retval = security_task_setnice(current, nice);
61737 @@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int
61738 long power;
61739 int weight;
61740
61741 - WARN_ON(!sd || !sd->groups);
61742 + BUG_ON(!sd || !sd->groups);
61743
61744 if (cpu != group_first_cpu(sd->groups))
61745 return;
61746 diff -urNp linux-2.6.32.42/kernel/signal.c linux-2.6.32.42/kernel/signal.c
61747 --- linux-2.6.32.42/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
61748 +++ linux-2.6.32.42/kernel/signal.c 2011-05-22 23:02:06.000000000 -0400
61749 @@ -41,12 +41,12 @@
61750
61751 static struct kmem_cache *sigqueue_cachep;
61752
61753 -static void __user *sig_handler(struct task_struct *t, int sig)
61754 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
61755 {
61756 return t->sighand->action[sig - 1].sa.sa_handler;
61757 }
61758
61759 -static int sig_handler_ignored(void __user *handler, int sig)
61760 +static int sig_handler_ignored(__sighandler_t handler, int sig)
61761 {
61762 /* Is it explicitly or implicitly ignored? */
61763 return handler == SIG_IGN ||
61764 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
61765 static int sig_task_ignored(struct task_struct *t, int sig,
61766 int from_ancestor_ns)
61767 {
61768 - void __user *handler;
61769 + __sighandler_t handler;
61770
61771 handler = sig_handler(t, sig);
61772
61773 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
61774 */
61775 user = get_uid(__task_cred(t)->user);
61776 atomic_inc(&user->sigpending);
61777 +
61778 + if (!override_rlimit)
61779 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
61780 if (override_rlimit ||
61781 atomic_read(&user->sigpending) <=
61782 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
61783 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
61784
61785 int unhandled_signal(struct task_struct *tsk, int sig)
61786 {
61787 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
61788 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
61789 if (is_global_init(tsk))
61790 return 1;
61791 if (handler != SIG_IGN && handler != SIG_DFL)
61792 @@ -627,6 +630,9 @@ static int check_kill_permission(int sig
61793 }
61794 }
61795
61796 + if (gr_handle_signal(t, sig))
61797 + return -EPERM;
61798 +
61799 return security_task_kill(t, info, sig, 0);
61800 }
61801
61802 @@ -968,7 +974,7 @@ __group_send_sig_info(int sig, struct si
61803 return send_signal(sig, info, p, 1);
61804 }
61805
61806 -static int
61807 +int
61808 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
61809 {
61810 return send_signal(sig, info, t, 0);
61811 @@ -1005,6 +1011,7 @@ force_sig_info(int sig, struct siginfo *
61812 unsigned long int flags;
61813 int ret, blocked, ignored;
61814 struct k_sigaction *action;
61815 + int is_unhandled = 0;
61816
61817 spin_lock_irqsave(&t->sighand->siglock, flags);
61818 action = &t->sighand->action[sig-1];
61819 @@ -1019,9 +1026,18 @@ force_sig_info(int sig, struct siginfo *
61820 }
61821 if (action->sa.sa_handler == SIG_DFL)
61822 t->signal->flags &= ~SIGNAL_UNKILLABLE;
61823 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
61824 + is_unhandled = 1;
61825 ret = specific_send_sig_info(sig, info, t);
61826 spin_unlock_irqrestore(&t->sighand->siglock, flags);
61827
61828 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
61829 + normal operation */
61830 + if (is_unhandled) {
61831 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
61832 + gr_handle_crash(t, sig);
61833 + }
61834 +
61835 return ret;
61836 }
61837
61838 @@ -1081,8 +1097,11 @@ int group_send_sig_info(int sig, struct
61839 {
61840 int ret = check_kill_permission(sig, info, p);
61841
61842 - if (!ret && sig)
61843 + if (!ret && sig) {
61844 ret = do_send_sig_info(sig, info, p, true);
61845 + if (!ret)
61846 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
61847 + }
61848
61849 return ret;
61850 }
61851 @@ -1644,6 +1663,8 @@ void ptrace_notify(int exit_code)
61852 {
61853 siginfo_t info;
61854
61855 + pax_track_stack();
61856 +
61857 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
61858
61859 memset(&info, 0, sizeof info);
61860 diff -urNp linux-2.6.32.42/kernel/smp.c linux-2.6.32.42/kernel/smp.c
61861 --- linux-2.6.32.42/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
61862 +++ linux-2.6.32.42/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
61863 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
61864 }
61865 EXPORT_SYMBOL(smp_call_function);
61866
61867 -void ipi_call_lock(void)
61868 +void ipi_call_lock(void) __acquires(call_function.lock)
61869 {
61870 spin_lock(&call_function.lock);
61871 }
61872
61873 -void ipi_call_unlock(void)
61874 +void ipi_call_unlock(void) __releases(call_function.lock)
61875 {
61876 spin_unlock(&call_function.lock);
61877 }
61878
61879 -void ipi_call_lock_irq(void)
61880 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
61881 {
61882 spin_lock_irq(&call_function.lock);
61883 }
61884
61885 -void ipi_call_unlock_irq(void)
61886 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
61887 {
61888 spin_unlock_irq(&call_function.lock);
61889 }
61890 diff -urNp linux-2.6.32.42/kernel/softirq.c linux-2.6.32.42/kernel/softirq.c
61891 --- linux-2.6.32.42/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
61892 +++ linux-2.6.32.42/kernel/softirq.c 2011-04-17 15:56:46.000000000 -0400
61893 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
61894
61895 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
61896
61897 -char *softirq_to_name[NR_SOFTIRQS] = {
61898 +const char * const softirq_to_name[NR_SOFTIRQS] = {
61899 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61900 "TASKLET", "SCHED", "HRTIMER", "RCU"
61901 };
61902 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
61903
61904 asmlinkage void __do_softirq(void)
61905 {
61906 - struct softirq_action *h;
61907 + const struct softirq_action *h;
61908 __u32 pending;
61909 int max_restart = MAX_SOFTIRQ_RESTART;
61910 int cpu;
61911 @@ -233,7 +233,7 @@ restart:
61912 kstat_incr_softirqs_this_cpu(h - softirq_vec);
61913
61914 trace_softirq_entry(h, softirq_vec);
61915 - h->action(h);
61916 + h->action();
61917 trace_softirq_exit(h, softirq_vec);
61918 if (unlikely(prev_count != preempt_count())) {
61919 printk(KERN_ERR "huh, entered softirq %td %s %p"
61920 @@ -363,7 +363,7 @@ void raise_softirq(unsigned int nr)
61921 local_irq_restore(flags);
61922 }
61923
61924 -void open_softirq(int nr, void (*action)(struct softirq_action *))
61925 +void open_softirq(int nr, void (*action)(void))
61926 {
61927 softirq_vec[nr].action = action;
61928 }
61929 @@ -419,7 +419,7 @@ void __tasklet_hi_schedule_first(struct
61930
61931 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
61932
61933 -static void tasklet_action(struct softirq_action *a)
61934 +static void tasklet_action(void)
61935 {
61936 struct tasklet_struct *list;
61937
61938 @@ -454,7 +454,7 @@ static void tasklet_action(struct softir
61939 }
61940 }
61941
61942 -static void tasklet_hi_action(struct softirq_action *a)
61943 +static void tasklet_hi_action(void)
61944 {
61945 struct tasklet_struct *list;
61946
61947 diff -urNp linux-2.6.32.42/kernel/sys.c linux-2.6.32.42/kernel/sys.c
61948 --- linux-2.6.32.42/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
61949 +++ linux-2.6.32.42/kernel/sys.c 2011-04-17 15:56:46.000000000 -0400
61950 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
61951 error = -EACCES;
61952 goto out;
61953 }
61954 +
61955 + if (gr_handle_chroot_setpriority(p, niceval)) {
61956 + error = -EACCES;
61957 + goto out;
61958 + }
61959 +
61960 no_nice = security_task_setnice(p, niceval);
61961 if (no_nice) {
61962 error = no_nice;
61963 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
61964 !(user = find_user(who)))
61965 goto out_unlock; /* No processes for this user */
61966
61967 - do_each_thread(g, p)
61968 + do_each_thread(g, p) {
61969 if (__task_cred(p)->uid == who)
61970 error = set_one_prio(p, niceval, error);
61971 - while_each_thread(g, p);
61972 + } while_each_thread(g, p);
61973 if (who != cred->uid)
61974 free_uid(user); /* For find_user() */
61975 break;
61976 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
61977 !(user = find_user(who)))
61978 goto out_unlock; /* No processes for this user */
61979
61980 - do_each_thread(g, p)
61981 + do_each_thread(g, p) {
61982 if (__task_cred(p)->uid == who) {
61983 niceval = 20 - task_nice(p);
61984 if (niceval > retval)
61985 retval = niceval;
61986 }
61987 - while_each_thread(g, p);
61988 + } while_each_thread(g, p);
61989 if (who != cred->uid)
61990 free_uid(user); /* for find_user() */
61991 break;
61992 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
61993 goto error;
61994 }
61995
61996 + if (gr_check_group_change(new->gid, new->egid, -1))
61997 + goto error;
61998 +
61999 if (rgid != (gid_t) -1 ||
62000 (egid != (gid_t) -1 && egid != old->gid))
62001 new->sgid = new->egid;
62002 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
62003 goto error;
62004
62005 retval = -EPERM;
62006 +
62007 + if (gr_check_group_change(gid, gid, gid))
62008 + goto error;
62009 +
62010 if (capable(CAP_SETGID))
62011 new->gid = new->egid = new->sgid = new->fsgid = gid;
62012 else if (gid == old->gid || gid == old->sgid)
62013 @@ -627,6 +640,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
62014 goto error;
62015 }
62016
62017 + if (gr_check_user_change(new->uid, new->euid, -1))
62018 + goto error;
62019 +
62020 if (new->uid != old->uid) {
62021 retval = set_user(new);
62022 if (retval < 0)
62023 @@ -675,6 +691,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
62024 goto error;
62025
62026 retval = -EPERM;
62027 +
62028 + if (gr_check_crash_uid(uid))
62029 + goto error;
62030 + if (gr_check_user_change(uid, uid, uid))
62031 + goto error;
62032 +
62033 if (capable(CAP_SETUID)) {
62034 new->suid = new->uid = uid;
62035 if (uid != old->uid) {
62036 @@ -732,6 +754,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
62037 goto error;
62038 }
62039
62040 + if (gr_check_user_change(ruid, euid, -1))
62041 + goto error;
62042 +
62043 if (ruid != (uid_t) -1) {
62044 new->uid = ruid;
62045 if (ruid != old->uid) {
62046 @@ -800,6 +825,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
62047 goto error;
62048 }
62049
62050 + if (gr_check_group_change(rgid, egid, -1))
62051 + goto error;
62052 +
62053 if (rgid != (gid_t) -1)
62054 new->gid = rgid;
62055 if (egid != (gid_t) -1)
62056 @@ -849,6 +877,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62057 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
62058 goto error;
62059
62060 + if (gr_check_user_change(-1, -1, uid))
62061 + goto error;
62062 +
62063 if (uid == old->uid || uid == old->euid ||
62064 uid == old->suid || uid == old->fsuid ||
62065 capable(CAP_SETUID)) {
62066 @@ -889,6 +920,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
62067 if (gid == old->gid || gid == old->egid ||
62068 gid == old->sgid || gid == old->fsgid ||
62069 capable(CAP_SETGID)) {
62070 + if (gr_check_group_change(-1, -1, gid))
62071 + goto error;
62072 +
62073 if (gid != old_fsgid) {
62074 new->fsgid = gid;
62075 goto change_okay;
62076 @@ -1454,7 +1488,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
62077 error = get_dumpable(me->mm);
62078 break;
62079 case PR_SET_DUMPABLE:
62080 - if (arg2 < 0 || arg2 > 1) {
62081 + if (arg2 > 1) {
62082 error = -EINVAL;
62083 break;
62084 }
62085 diff -urNp linux-2.6.32.42/kernel/sysctl.c linux-2.6.32.42/kernel/sysctl.c
62086 --- linux-2.6.32.42/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
62087 +++ linux-2.6.32.42/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
62088 @@ -63,6 +63,13 @@
62089 static int deprecated_sysctl_warning(struct __sysctl_args *args);
62090
62091 #if defined(CONFIG_SYSCTL)
62092 +#include <linux/grsecurity.h>
62093 +#include <linux/grinternal.h>
62094 +
62095 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
62096 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
62097 + const int op);
62098 +extern int gr_handle_chroot_sysctl(const int op);
62099
62100 /* External variables not in a header file. */
62101 extern int C_A_D;
62102 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
62103 static int proc_taint(struct ctl_table *table, int write,
62104 void __user *buffer, size_t *lenp, loff_t *ppos);
62105 #endif
62106 +extern ctl_table grsecurity_table[];
62107
62108 static struct ctl_table root_table[];
62109 static struct ctl_table_root sysctl_table_root;
62110 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
62111 int sysctl_legacy_va_layout;
62112 #endif
62113
62114 +#ifdef CONFIG_PAX_SOFTMODE
62115 +static ctl_table pax_table[] = {
62116 + {
62117 + .ctl_name = CTL_UNNUMBERED,
62118 + .procname = "softmode",
62119 + .data = &pax_softmode,
62120 + .maxlen = sizeof(unsigned int),
62121 + .mode = 0600,
62122 + .proc_handler = &proc_dointvec,
62123 + },
62124 +
62125 + { .ctl_name = 0 }
62126 +};
62127 +#endif
62128 +
62129 extern int prove_locking;
62130 extern int lock_stat;
62131
62132 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
62133 #endif
62134
62135 static struct ctl_table kern_table[] = {
62136 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
62137 + {
62138 + .ctl_name = CTL_UNNUMBERED,
62139 + .procname = "grsecurity",
62140 + .mode = 0500,
62141 + .child = grsecurity_table,
62142 + },
62143 +#endif
62144 +
62145 +#ifdef CONFIG_PAX_SOFTMODE
62146 + {
62147 + .ctl_name = CTL_UNNUMBERED,
62148 + .procname = "pax",
62149 + .mode = 0500,
62150 + .child = pax_table,
62151 + },
62152 +#endif
62153 +
62154 {
62155 .ctl_name = CTL_UNNUMBERED,
62156 .procname = "sched_child_runs_first",
62157 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
62158 .data = &modprobe_path,
62159 .maxlen = KMOD_PATH_LEN,
62160 .mode = 0644,
62161 - .proc_handler = &proc_dostring,
62162 - .strategy = &sysctl_string,
62163 + .proc_handler = &proc_dostring_modpriv,
62164 + .strategy = &sysctl_string_modpriv,
62165 },
62166 {
62167 .ctl_name = CTL_UNNUMBERED,
62168 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
62169 .mode = 0644,
62170 .proc_handler = &proc_dointvec
62171 },
62172 + {
62173 + .procname = "heap_stack_gap",
62174 + .data = &sysctl_heap_stack_gap,
62175 + .maxlen = sizeof(sysctl_heap_stack_gap),
62176 + .mode = 0644,
62177 + .proc_handler = proc_doulongvec_minmax,
62178 + },
62179 #else
62180 {
62181 .ctl_name = CTL_UNNUMBERED,
62182 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
62183 return 0;
62184 }
62185
62186 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
62187 +
62188 static int parse_table(int __user *name, int nlen,
62189 void __user *oldval, size_t __user *oldlenp,
62190 void __user *newval, size_t newlen,
62191 @@ -1821,7 +1871,7 @@ repeat:
62192 if (n == table->ctl_name) {
62193 int error;
62194 if (table->child) {
62195 - if (sysctl_perm(root, table, MAY_EXEC))
62196 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
62197 return -EPERM;
62198 name++;
62199 nlen--;
62200 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
62201 int error;
62202 int mode;
62203
62204 + if (table->parent != NULL && table->parent->procname != NULL &&
62205 + table->procname != NULL &&
62206 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
62207 + return -EACCES;
62208 + if (gr_handle_chroot_sysctl(op))
62209 + return -EACCES;
62210 + error = gr_handle_sysctl(table, op);
62211 + if (error)
62212 + return error;
62213 +
62214 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62215 + if (error)
62216 + return error;
62217 +
62218 + if (root->permissions)
62219 + mode = root->permissions(root, current->nsproxy, table);
62220 + else
62221 + mode = table->mode;
62222 +
62223 + return test_perm(mode, op);
62224 +}
62225 +
62226 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
62227 +{
62228 + int error;
62229 + int mode;
62230 +
62231 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62232 if (error)
62233 return error;
62234 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
62235 buffer, lenp, ppos);
62236 }
62237
62238 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62239 + void __user *buffer, size_t *lenp, loff_t *ppos)
62240 +{
62241 + if (write && !capable(CAP_SYS_MODULE))
62242 + return -EPERM;
62243 +
62244 + return _proc_do_string(table->data, table->maxlen, write,
62245 + buffer, lenp, ppos);
62246 +}
62247 +
62248
62249 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
62250 int *valp,
62251 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
62252 vleft = table->maxlen / sizeof(unsigned long);
62253 left = *lenp;
62254
62255 - for (; left && vleft--; i++, min++, max++, first=0) {
62256 + for (; left && vleft--; i++, first=0) {
62257 if (write) {
62258 while (left) {
62259 char c;
62260 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
62261 return -ENOSYS;
62262 }
62263
62264 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62265 + void __user *buffer, size_t *lenp, loff_t *ppos)
62266 +{
62267 + return -ENOSYS;
62268 +}
62269 +
62270 int proc_dointvec(struct ctl_table *table, int write,
62271 void __user *buffer, size_t *lenp, loff_t *ppos)
62272 {
62273 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
62274 return 1;
62275 }
62276
62277 +int sysctl_string_modpriv(struct ctl_table *table,
62278 + void __user *oldval, size_t __user *oldlenp,
62279 + void __user *newval, size_t newlen)
62280 +{
62281 + if (newval && newlen && !capable(CAP_SYS_MODULE))
62282 + return -EPERM;
62283 +
62284 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
62285 +}
62286 +
62287 /*
62288 * This function makes sure that all of the integers in the vector
62289 * are between the minimum and maximum values given in the arrays
62290 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
62291 return -ENOSYS;
62292 }
62293
62294 +int sysctl_string_modpriv(struct ctl_table *table,
62295 + void __user *oldval, size_t __user *oldlenp,
62296 + void __user *newval, size_t newlen)
62297 +{
62298 + return -ENOSYS;
62299 +}
62300 +
62301 int sysctl_intvec(struct ctl_table *table,
62302 void __user *oldval, size_t __user *oldlenp,
62303 void __user *newval, size_t newlen)
62304 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
62305 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
62306 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
62307 EXPORT_SYMBOL(proc_dostring);
62308 +EXPORT_SYMBOL(proc_dostring_modpriv);
62309 EXPORT_SYMBOL(proc_doulongvec_minmax);
62310 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
62311 EXPORT_SYMBOL(register_sysctl_table);
62312 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
62313 EXPORT_SYMBOL(sysctl_jiffies);
62314 EXPORT_SYMBOL(sysctl_ms_jiffies);
62315 EXPORT_SYMBOL(sysctl_string);
62316 +EXPORT_SYMBOL(sysctl_string_modpriv);
62317 EXPORT_SYMBOL(sysctl_data);
62318 EXPORT_SYMBOL(unregister_sysctl_table);
62319 diff -urNp linux-2.6.32.42/kernel/sysctl_check.c linux-2.6.32.42/kernel/sysctl_check.c
62320 --- linux-2.6.32.42/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
62321 +++ linux-2.6.32.42/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
62322 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
62323 } else {
62324 if ((table->strategy == sysctl_data) ||
62325 (table->strategy == sysctl_string) ||
62326 + (table->strategy == sysctl_string_modpriv) ||
62327 (table->strategy == sysctl_intvec) ||
62328 (table->strategy == sysctl_jiffies) ||
62329 (table->strategy == sysctl_ms_jiffies) ||
62330 (table->proc_handler == proc_dostring) ||
62331 + (table->proc_handler == proc_dostring_modpriv) ||
62332 (table->proc_handler == proc_dointvec) ||
62333 (table->proc_handler == proc_dointvec_minmax) ||
62334 (table->proc_handler == proc_dointvec_jiffies) ||
62335 diff -urNp linux-2.6.32.42/kernel/taskstats.c linux-2.6.32.42/kernel/taskstats.c
62336 --- linux-2.6.32.42/kernel/taskstats.c 2011-03-27 14:31:47.000000000 -0400
62337 +++ linux-2.6.32.42/kernel/taskstats.c 2011-04-17 15:56:46.000000000 -0400
62338 @@ -26,9 +26,12 @@
62339 #include <linux/cgroup.h>
62340 #include <linux/fs.h>
62341 #include <linux/file.h>
62342 +#include <linux/grsecurity.h>
62343 #include <net/genetlink.h>
62344 #include <asm/atomic.h>
62345
62346 +extern int gr_is_taskstats_denied(int pid);
62347 +
62348 /*
62349 * Maximum length of a cpumask that can be specified in
62350 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
62351 @@ -433,6 +436,9 @@ static int taskstats_user_cmd(struct sk_
62352 size_t size;
62353 cpumask_var_t mask;
62354
62355 + if (gr_is_taskstats_denied(current->pid))
62356 + return -EACCES;
62357 +
62358 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
62359 return -ENOMEM;
62360
62361 diff -urNp linux-2.6.32.42/kernel/time/tick-broadcast.c linux-2.6.32.42/kernel/time/tick-broadcast.c
62362 --- linux-2.6.32.42/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
62363 +++ linux-2.6.32.42/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
62364 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
62365 * then clear the broadcast bit.
62366 */
62367 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
62368 - int cpu = smp_processor_id();
62369 + cpu = smp_processor_id();
62370
62371 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
62372 tick_broadcast_clear_oneshot(cpu);
62373 diff -urNp linux-2.6.32.42/kernel/time/timekeeping.c linux-2.6.32.42/kernel/time/timekeeping.c
62374 --- linux-2.6.32.42/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
62375 +++ linux-2.6.32.42/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
62376 @@ -14,6 +14,7 @@
62377 #include <linux/init.h>
62378 #include <linux/mm.h>
62379 #include <linux/sched.h>
62380 +#include <linux/grsecurity.h>
62381 #include <linux/sysdev.h>
62382 #include <linux/clocksource.h>
62383 #include <linux/jiffies.h>
62384 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
62385 */
62386 struct timespec ts = xtime;
62387 timespec_add_ns(&ts, nsec);
62388 - ACCESS_ONCE(xtime_cache) = ts;
62389 + ACCESS_ONCE_RW(xtime_cache) = ts;
62390 }
62391
62392 /* must hold xtime_lock */
62393 @@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
62394 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
62395 return -EINVAL;
62396
62397 + gr_log_timechange();
62398 +
62399 write_seqlock_irqsave(&xtime_lock, flags);
62400
62401 timekeeping_forward_now();
62402 diff -urNp linux-2.6.32.42/kernel/time/timer_list.c linux-2.6.32.42/kernel/time/timer_list.c
62403 --- linux-2.6.32.42/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
62404 +++ linux-2.6.32.42/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
62405 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
62406
62407 static void print_name_offset(struct seq_file *m, void *sym)
62408 {
62409 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62410 + SEQ_printf(m, "<%p>", NULL);
62411 +#else
62412 char symname[KSYM_NAME_LEN];
62413
62414 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
62415 SEQ_printf(m, "<%p>", sym);
62416 else
62417 SEQ_printf(m, "%s", symname);
62418 +#endif
62419 }
62420
62421 static void
62422 @@ -112,7 +116,11 @@ next_one:
62423 static void
62424 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
62425 {
62426 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62427 + SEQ_printf(m, " .base: %p\n", NULL);
62428 +#else
62429 SEQ_printf(m, " .base: %p\n", base);
62430 +#endif
62431 SEQ_printf(m, " .index: %d\n",
62432 base->index);
62433 SEQ_printf(m, " .resolution: %Lu nsecs\n",
62434 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
62435 {
62436 struct proc_dir_entry *pe;
62437
62438 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62439 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
62440 +#else
62441 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
62442 +#endif
62443 if (!pe)
62444 return -ENOMEM;
62445 return 0;
62446 diff -urNp linux-2.6.32.42/kernel/time/timer_stats.c linux-2.6.32.42/kernel/time/timer_stats.c
62447 --- linux-2.6.32.42/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
62448 +++ linux-2.6.32.42/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
62449 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
62450 static unsigned long nr_entries;
62451 static struct entry entries[MAX_ENTRIES];
62452
62453 -static atomic_t overflow_count;
62454 +static atomic_unchecked_t overflow_count;
62455
62456 /*
62457 * The entries are in a hash-table, for fast lookup:
62458 @@ -140,7 +140,7 @@ static void reset_entries(void)
62459 nr_entries = 0;
62460 memset(entries, 0, sizeof(entries));
62461 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
62462 - atomic_set(&overflow_count, 0);
62463 + atomic_set_unchecked(&overflow_count, 0);
62464 }
62465
62466 static struct entry *alloc_entry(void)
62467 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
62468 if (likely(entry))
62469 entry->count++;
62470 else
62471 - atomic_inc(&overflow_count);
62472 + atomic_inc_unchecked(&overflow_count);
62473
62474 out_unlock:
62475 spin_unlock_irqrestore(lock, flags);
62476 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
62477
62478 static void print_name_offset(struct seq_file *m, unsigned long addr)
62479 {
62480 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62481 + seq_printf(m, "<%p>", NULL);
62482 +#else
62483 char symname[KSYM_NAME_LEN];
62484
62485 if (lookup_symbol_name(addr, symname) < 0)
62486 seq_printf(m, "<%p>", (void *)addr);
62487 else
62488 seq_printf(m, "%s", symname);
62489 +#endif
62490 }
62491
62492 static int tstats_show(struct seq_file *m, void *v)
62493 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
62494
62495 seq_puts(m, "Timer Stats Version: v0.2\n");
62496 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
62497 - if (atomic_read(&overflow_count))
62498 + if (atomic_read_unchecked(&overflow_count))
62499 seq_printf(m, "Overflow: %d entries\n",
62500 - atomic_read(&overflow_count));
62501 + atomic_read_unchecked(&overflow_count));
62502
62503 for (i = 0; i < nr_entries; i++) {
62504 entry = entries + i;
62505 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
62506 {
62507 struct proc_dir_entry *pe;
62508
62509 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62510 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
62511 +#else
62512 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
62513 +#endif
62514 if (!pe)
62515 return -ENOMEM;
62516 return 0;
62517 diff -urNp linux-2.6.32.42/kernel/time.c linux-2.6.32.42/kernel/time.c
62518 --- linux-2.6.32.42/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
62519 +++ linux-2.6.32.42/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
62520 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
62521 return error;
62522
62523 if (tz) {
62524 + /* we log in do_settimeofday called below, so don't log twice
62525 + */
62526 + if (!tv)
62527 + gr_log_timechange();
62528 +
62529 /* SMP safe, global irq locking makes it work. */
62530 sys_tz = *tz;
62531 update_vsyscall_tz();
62532 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
62533 * Avoid unnecessary multiplications/divisions in the
62534 * two most common HZ cases:
62535 */
62536 -unsigned int inline jiffies_to_msecs(const unsigned long j)
62537 +inline unsigned int jiffies_to_msecs(const unsigned long j)
62538 {
62539 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
62540 return (MSEC_PER_SEC / HZ) * j;
62541 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
62542 }
62543 EXPORT_SYMBOL(jiffies_to_msecs);
62544
62545 -unsigned int inline jiffies_to_usecs(const unsigned long j)
62546 +inline unsigned int jiffies_to_usecs(const unsigned long j)
62547 {
62548 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
62549 return (USEC_PER_SEC / HZ) * j;
62550 diff -urNp linux-2.6.32.42/kernel/timer.c linux-2.6.32.42/kernel/timer.c
62551 --- linux-2.6.32.42/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
62552 +++ linux-2.6.32.42/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
62553 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
62554 /*
62555 * This function runs timers and the timer-tq in bottom half context.
62556 */
62557 -static void run_timer_softirq(struct softirq_action *h)
62558 +static void run_timer_softirq(void)
62559 {
62560 struct tvec_base *base = __get_cpu_var(tvec_bases);
62561
62562 diff -urNp linux-2.6.32.42/kernel/trace/blktrace.c linux-2.6.32.42/kernel/trace/blktrace.c
62563 --- linux-2.6.32.42/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
62564 +++ linux-2.6.32.42/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
62565 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
62566 struct blk_trace *bt = filp->private_data;
62567 char buf[16];
62568
62569 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
62570 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
62571
62572 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
62573 }
62574 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
62575 return 1;
62576
62577 bt = buf->chan->private_data;
62578 - atomic_inc(&bt->dropped);
62579 + atomic_inc_unchecked(&bt->dropped);
62580 return 0;
62581 }
62582
62583 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
62584
62585 bt->dir = dir;
62586 bt->dev = dev;
62587 - atomic_set(&bt->dropped, 0);
62588 + atomic_set_unchecked(&bt->dropped, 0);
62589
62590 ret = -EIO;
62591 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
62592 diff -urNp linux-2.6.32.42/kernel/trace/ftrace.c linux-2.6.32.42/kernel/trace/ftrace.c
62593 --- linux-2.6.32.42/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
62594 +++ linux-2.6.32.42/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
62595 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
62596
62597 ip = rec->ip;
62598
62599 + ret = ftrace_arch_code_modify_prepare();
62600 + FTRACE_WARN_ON(ret);
62601 + if (ret)
62602 + return 0;
62603 +
62604 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
62605 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
62606 if (ret) {
62607 ftrace_bug(ret, ip);
62608 rec->flags |= FTRACE_FL_FAILED;
62609 - return 0;
62610 }
62611 - return 1;
62612 + return ret ? 0 : 1;
62613 }
62614
62615 /*
62616 diff -urNp linux-2.6.32.42/kernel/trace/ring_buffer.c linux-2.6.32.42/kernel/trace/ring_buffer.c
62617 --- linux-2.6.32.42/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
62618 +++ linux-2.6.32.42/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
62619 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
62620 * the reader page). But if the next page is a header page,
62621 * its flags will be non zero.
62622 */
62623 -static int inline
62624 +static inline int
62625 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
62626 struct buffer_page *page, struct list_head *list)
62627 {
62628 diff -urNp linux-2.6.32.42/kernel/trace/trace.c linux-2.6.32.42/kernel/trace/trace.c
62629 --- linux-2.6.32.42/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
62630 +++ linux-2.6.32.42/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
62631 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
62632 size_t rem;
62633 unsigned int i;
62634
62635 + pax_track_stack();
62636 +
62637 /* copy the tracer to avoid using a global lock all around */
62638 mutex_lock(&trace_types_lock);
62639 if (unlikely(old_tracer != current_trace && current_trace)) {
62640 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
62641 int entries, size, i;
62642 size_t ret;
62643
62644 + pax_track_stack();
62645 +
62646 if (*ppos & (PAGE_SIZE - 1)) {
62647 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
62648 return -EINVAL;
62649 @@ -3816,10 +3820,9 @@ static const struct file_operations trac
62650 };
62651 #endif
62652
62653 -static struct dentry *d_tracer;
62654 -
62655 struct dentry *tracing_init_dentry(void)
62656 {
62657 + static struct dentry *d_tracer;
62658 static int once;
62659
62660 if (d_tracer)
62661 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
62662 return d_tracer;
62663 }
62664
62665 -static struct dentry *d_percpu;
62666 -
62667 struct dentry *tracing_dentry_percpu(void)
62668 {
62669 + static struct dentry *d_percpu;
62670 static int once;
62671 struct dentry *d_tracer;
62672
62673 diff -urNp linux-2.6.32.42/kernel/trace/trace_events.c linux-2.6.32.42/kernel/trace/trace_events.c
62674 --- linux-2.6.32.42/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
62675 +++ linux-2.6.32.42/kernel/trace/trace_events.c 2011-04-17 15:56:46.000000000 -0400
62676 @@ -951,6 +951,8 @@ static LIST_HEAD(ftrace_module_file_list
62677 * Modules must own their file_operations to keep up with
62678 * reference counting.
62679 */
62680 +
62681 +/* cannot be const */
62682 struct ftrace_module_file_ops {
62683 struct list_head list;
62684 struct module *mod;
62685 diff -urNp linux-2.6.32.42/kernel/trace/trace_mmiotrace.c linux-2.6.32.42/kernel/trace/trace_mmiotrace.c
62686 --- linux-2.6.32.42/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
62687 +++ linux-2.6.32.42/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
62688 @@ -23,7 +23,7 @@ struct header_iter {
62689 static struct trace_array *mmio_trace_array;
62690 static bool overrun_detected;
62691 static unsigned long prev_overruns;
62692 -static atomic_t dropped_count;
62693 +static atomic_unchecked_t dropped_count;
62694
62695 static void mmio_reset_data(struct trace_array *tr)
62696 {
62697 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
62698
62699 static unsigned long count_overruns(struct trace_iterator *iter)
62700 {
62701 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
62702 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
62703 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
62704
62705 if (over > prev_overruns)
62706 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
62707 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
62708 sizeof(*entry), 0, pc);
62709 if (!event) {
62710 - atomic_inc(&dropped_count);
62711 + atomic_inc_unchecked(&dropped_count);
62712 return;
62713 }
62714 entry = ring_buffer_event_data(event);
62715 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
62716 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
62717 sizeof(*entry), 0, pc);
62718 if (!event) {
62719 - atomic_inc(&dropped_count);
62720 + atomic_inc_unchecked(&dropped_count);
62721 return;
62722 }
62723 entry = ring_buffer_event_data(event);
62724 diff -urNp linux-2.6.32.42/kernel/trace/trace_output.c linux-2.6.32.42/kernel/trace/trace_output.c
62725 --- linux-2.6.32.42/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
62726 +++ linux-2.6.32.42/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
62727 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
62728 return 0;
62729 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
62730 if (!IS_ERR(p)) {
62731 - p = mangle_path(s->buffer + s->len, p, "\n");
62732 + p = mangle_path(s->buffer + s->len, p, "\n\\");
62733 if (p) {
62734 s->len = p - s->buffer;
62735 return 1;
62736 diff -urNp linux-2.6.32.42/kernel/trace/trace_stack.c linux-2.6.32.42/kernel/trace/trace_stack.c
62737 --- linux-2.6.32.42/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
62738 +++ linux-2.6.32.42/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
62739 @@ -50,7 +50,7 @@ static inline void check_stack(void)
62740 return;
62741
62742 /* we do not handle interrupt stacks yet */
62743 - if (!object_is_on_stack(&this_size))
62744 + if (!object_starts_on_stack(&this_size))
62745 return;
62746
62747 local_irq_save(flags);
62748 diff -urNp linux-2.6.32.42/kernel/trace/trace_workqueue.c linux-2.6.32.42/kernel/trace/trace_workqueue.c
62749 --- linux-2.6.32.42/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
62750 +++ linux-2.6.32.42/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
62751 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
62752 int cpu;
62753 pid_t pid;
62754 /* Can be inserted from interrupt or user context, need to be atomic */
62755 - atomic_t inserted;
62756 + atomic_unchecked_t inserted;
62757 /*
62758 * Don't need to be atomic, works are serialized in a single workqueue thread
62759 * on a single CPU.
62760 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
62761 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
62762 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
62763 if (node->pid == wq_thread->pid) {
62764 - atomic_inc(&node->inserted);
62765 + atomic_inc_unchecked(&node->inserted);
62766 goto found;
62767 }
62768 }
62769 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
62770 tsk = get_pid_task(pid, PIDTYPE_PID);
62771 if (tsk) {
62772 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
62773 - atomic_read(&cws->inserted), cws->executed,
62774 + atomic_read_unchecked(&cws->inserted), cws->executed,
62775 tsk->comm);
62776 put_task_struct(tsk);
62777 }
62778 diff -urNp linux-2.6.32.42/kernel/user.c linux-2.6.32.42/kernel/user.c
62779 --- linux-2.6.32.42/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
62780 +++ linux-2.6.32.42/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
62781 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
62782 spin_lock_irq(&uidhash_lock);
62783 up = uid_hash_find(uid, hashent);
62784 if (up) {
62785 + put_user_ns(ns);
62786 key_put(new->uid_keyring);
62787 key_put(new->session_keyring);
62788 kmem_cache_free(uid_cachep, new);
62789 diff -urNp linux-2.6.32.42/lib/bug.c linux-2.6.32.42/lib/bug.c
62790 --- linux-2.6.32.42/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
62791 +++ linux-2.6.32.42/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
62792 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
62793 return BUG_TRAP_TYPE_NONE;
62794
62795 bug = find_bug(bugaddr);
62796 + if (!bug)
62797 + return BUG_TRAP_TYPE_NONE;
62798
62799 printk(KERN_EMERG "------------[ cut here ]------------\n");
62800
62801 diff -urNp linux-2.6.32.42/lib/debugobjects.c linux-2.6.32.42/lib/debugobjects.c
62802 --- linux-2.6.32.42/lib/debugobjects.c 2011-03-27 14:31:47.000000000 -0400
62803 +++ linux-2.6.32.42/lib/debugobjects.c 2011-04-17 15:56:46.000000000 -0400
62804 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
62805 if (limit > 4)
62806 return;
62807
62808 - is_on_stack = object_is_on_stack(addr);
62809 + is_on_stack = object_starts_on_stack(addr);
62810 if (is_on_stack == onstack)
62811 return;
62812
62813 diff -urNp linux-2.6.32.42/lib/dma-debug.c linux-2.6.32.42/lib/dma-debug.c
62814 --- linux-2.6.32.42/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
62815 +++ linux-2.6.32.42/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
62816 @@ -861,7 +861,7 @@ out:
62817
62818 static void check_for_stack(struct device *dev, void *addr)
62819 {
62820 - if (object_is_on_stack(addr))
62821 + if (object_starts_on_stack(addr))
62822 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
62823 "stack [addr=%p]\n", addr);
62824 }
62825 diff -urNp linux-2.6.32.42/lib/idr.c linux-2.6.32.42/lib/idr.c
62826 --- linux-2.6.32.42/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
62827 +++ linux-2.6.32.42/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
62828 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
62829 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
62830
62831 /* if already at the top layer, we need to grow */
62832 - if (id >= 1 << (idp->layers * IDR_BITS)) {
62833 + if (id >= (1 << (idp->layers * IDR_BITS))) {
62834 *starting_id = id;
62835 return IDR_NEED_TO_GROW;
62836 }
62837 diff -urNp linux-2.6.32.42/lib/inflate.c linux-2.6.32.42/lib/inflate.c
62838 --- linux-2.6.32.42/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
62839 +++ linux-2.6.32.42/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
62840 @@ -266,7 +266,7 @@ static void free(void *where)
62841 malloc_ptr = free_mem_ptr;
62842 }
62843 #else
62844 -#define malloc(a) kmalloc(a, GFP_KERNEL)
62845 +#define malloc(a) kmalloc((a), GFP_KERNEL)
62846 #define free(a) kfree(a)
62847 #endif
62848
62849 diff -urNp linux-2.6.32.42/lib/Kconfig.debug linux-2.6.32.42/lib/Kconfig.debug
62850 --- linux-2.6.32.42/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
62851 +++ linux-2.6.32.42/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
62852 @@ -905,7 +905,7 @@ config LATENCYTOP
62853 select STACKTRACE
62854 select SCHEDSTATS
62855 select SCHED_DEBUG
62856 - depends on HAVE_LATENCYTOP_SUPPORT
62857 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
62858 help
62859 Enable this option if you want to use the LatencyTOP tool
62860 to find out which userspace is blocking on what kernel operations.
62861 diff -urNp linux-2.6.32.42/lib/kobject.c linux-2.6.32.42/lib/kobject.c
62862 --- linux-2.6.32.42/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
62863 +++ linux-2.6.32.42/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
62864 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
62865 return ret;
62866 }
62867
62868 -struct sysfs_ops kobj_sysfs_ops = {
62869 +const struct sysfs_ops kobj_sysfs_ops = {
62870 .show = kobj_attr_show,
62871 .store = kobj_attr_store,
62872 };
62873 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
62874 * If the kset was not able to be created, NULL will be returned.
62875 */
62876 static struct kset *kset_create(const char *name,
62877 - struct kset_uevent_ops *uevent_ops,
62878 + const struct kset_uevent_ops *uevent_ops,
62879 struct kobject *parent_kobj)
62880 {
62881 struct kset *kset;
62882 @@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
62883 * If the kset was not able to be created, NULL will be returned.
62884 */
62885 struct kset *kset_create_and_add(const char *name,
62886 - struct kset_uevent_ops *uevent_ops,
62887 + const struct kset_uevent_ops *uevent_ops,
62888 struct kobject *parent_kobj)
62889 {
62890 struct kset *kset;
62891 diff -urNp linux-2.6.32.42/lib/kobject_uevent.c linux-2.6.32.42/lib/kobject_uevent.c
62892 --- linux-2.6.32.42/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
62893 +++ linux-2.6.32.42/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
62894 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
62895 const char *subsystem;
62896 struct kobject *top_kobj;
62897 struct kset *kset;
62898 - struct kset_uevent_ops *uevent_ops;
62899 + const struct kset_uevent_ops *uevent_ops;
62900 u64 seq;
62901 int i = 0;
62902 int retval = 0;
62903 diff -urNp linux-2.6.32.42/lib/kref.c linux-2.6.32.42/lib/kref.c
62904 --- linux-2.6.32.42/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
62905 +++ linux-2.6.32.42/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
62906 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
62907 */
62908 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
62909 {
62910 - WARN_ON(release == NULL);
62911 + BUG_ON(release == NULL);
62912 WARN_ON(release == (void (*)(struct kref *))kfree);
62913
62914 if (atomic_dec_and_test(&kref->refcount)) {
62915 diff -urNp linux-2.6.32.42/lib/parser.c linux-2.6.32.42/lib/parser.c
62916 --- linux-2.6.32.42/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
62917 +++ linux-2.6.32.42/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
62918 @@ -126,7 +126,7 @@ static int match_number(substring_t *s,
62919 char *buf;
62920 int ret;
62921
62922 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
62923 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
62924 if (!buf)
62925 return -ENOMEM;
62926 memcpy(buf, s->from, s->to - s->from);
62927 diff -urNp linux-2.6.32.42/lib/radix-tree.c linux-2.6.32.42/lib/radix-tree.c
62928 --- linux-2.6.32.42/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
62929 +++ linux-2.6.32.42/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
62930 @@ -81,7 +81,7 @@ struct radix_tree_preload {
62931 int nr;
62932 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
62933 };
62934 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
62935 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
62936
62937 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
62938 {
62939 diff -urNp linux-2.6.32.42/lib/random32.c linux-2.6.32.42/lib/random32.c
62940 --- linux-2.6.32.42/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
62941 +++ linux-2.6.32.42/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
62942 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
62943 */
62944 static inline u32 __seed(u32 x, u32 m)
62945 {
62946 - return (x < m) ? x + m : x;
62947 + return (x <= m) ? x + m + 1 : x;
62948 }
62949
62950 /**
62951 diff -urNp linux-2.6.32.42/lib/vsprintf.c linux-2.6.32.42/lib/vsprintf.c
62952 --- linux-2.6.32.42/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
62953 +++ linux-2.6.32.42/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
62954 @@ -16,6 +16,9 @@
62955 * - scnprintf and vscnprintf
62956 */
62957
62958 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62959 +#define __INCLUDED_BY_HIDESYM 1
62960 +#endif
62961 #include <stdarg.h>
62962 #include <linux/module.h>
62963 #include <linux/types.h>
62964 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end
62965 return buf;
62966 }
62967
62968 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
62969 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
62970 {
62971 int len, i;
62972
62973 if ((unsigned long)s < PAGE_SIZE)
62974 - s = "<NULL>";
62975 + s = "(null)";
62976
62977 len = strnlen(s, spec.precision);
62978
62979 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
62980 unsigned long value = (unsigned long) ptr;
62981 #ifdef CONFIG_KALLSYMS
62982 char sym[KSYM_SYMBOL_LEN];
62983 - if (ext != 'f' && ext != 's')
62984 + if (ext != 'f' && ext != 's' && ext != 'a')
62985 sprint_symbol(sym, value);
62986 else
62987 kallsyms_lookup(value, NULL, NULL, NULL, sym);
62988 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
62989 * - 'f' For simple symbolic function names without offset
62990 * - 'S' For symbolic direct pointers with offset
62991 * - 's' For symbolic direct pointers without offset
62992 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
62993 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
62994 * - 'R' For a struct resource pointer, it prints the range of
62995 * addresses (not the name nor the flags)
62996 * - 'M' For a 6-byte MAC address, it prints the address in the
62997 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
62998 struct printf_spec spec)
62999 {
63000 if (!ptr)
63001 - return string(buf, end, "(null)", spec);
63002 + return string(buf, end, "(nil)", spec);
63003
63004 switch (*fmt) {
63005 case 'F':
63006 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
63007 case 's':
63008 /* Fallthrough */
63009 case 'S':
63010 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63011 + break;
63012 +#else
63013 + return symbol_string(buf, end, ptr, spec, *fmt);
63014 +#endif
63015 + case 'a':
63016 + /* Fallthrough */
63017 + case 'A':
63018 return symbol_string(buf, end, ptr, spec, *fmt);
63019 case 'R':
63020 return resource_string(buf, end, ptr, spec);
63021 @@ -1445,7 +1458,7 @@ do { \
63022 size_t len;
63023 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
63024 || (unsigned long)save_str < PAGE_SIZE)
63025 - save_str = "<NULL>";
63026 + save_str = "(null)";
63027 len = strlen(save_str);
63028 if (str + len + 1 < end)
63029 memcpy(str, save_str, len + 1);
63030 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
63031 typeof(type) value; \
63032 if (sizeof(type) == 8) { \
63033 args = PTR_ALIGN(args, sizeof(u32)); \
63034 - *(u32 *)&value = *(u32 *)args; \
63035 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
63036 + *(u32 *)&value = *(const u32 *)args; \
63037 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
63038 } else { \
63039 args = PTR_ALIGN(args, sizeof(type)); \
63040 - value = *(typeof(type) *)args; \
63041 + value = *(const typeof(type) *)args; \
63042 } \
63043 args += sizeof(type); \
63044 value; \
63045 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
63046 const char *str_arg = args;
63047 size_t len = strlen(str_arg);
63048 args += len + 1;
63049 - str = string(str, end, (char *)str_arg, spec);
63050 + str = string(str, end, str_arg, spec);
63051 break;
63052 }
63053
63054 diff -urNp linux-2.6.32.42/localversion-grsec linux-2.6.32.42/localversion-grsec
63055 --- linux-2.6.32.42/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
63056 +++ linux-2.6.32.42/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
63057 @@ -0,0 +1 @@
63058 +-grsec
63059 diff -urNp linux-2.6.32.42/Makefile linux-2.6.32.42/Makefile
63060 --- linux-2.6.32.42/Makefile 2011-06-25 12:55:34.000000000 -0400
63061 +++ linux-2.6.32.42/Makefile 2011-06-25 12:56:37.000000000 -0400
63062 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
63063
63064 HOSTCC = gcc
63065 HOSTCXX = g++
63066 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
63067 -HOSTCXXFLAGS = -O2
63068 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
63069 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
63070 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
63071
63072 # Decide whether to build built-in, modular, or both.
63073 # Normally, just do built-in.
63074 @@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
63075 KBUILD_CPPFLAGS := -D__KERNEL__
63076
63077 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
63078 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
63079 -fno-strict-aliasing -fno-common \
63080 -Werror-implicit-function-declaration \
63081 -Wno-format-security \
63082 -fno-delete-null-pointer-checks
63083 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
63084 KBUILD_AFLAGS := -D__ASSEMBLY__
63085
63086 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
63087 @@ -403,7 +406,7 @@ endif
63088 # of make so .config is not included in this case either (for *config).
63089
63090 no-dot-config-targets := clean mrproper distclean \
63091 - cscope TAGS tags help %docs check% \
63092 + cscope gtags TAGS tags help %docs check% \
63093 include/linux/version.h headers_% \
63094 kernelrelease kernelversion
63095
63096 @@ -644,7 +647,7 @@ export mod_strip_cmd
63097
63098
63099 ifeq ($(KBUILD_EXTMOD),)
63100 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
63101 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
63102
63103 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
63104 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
63105 @@ -949,7 +952,19 @@ include/config/kernel.release: include/c
63106 # version.h and scripts_basic is processed / created.
63107
63108 # Listed in dependency order
63109 -PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
63110 +PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 pax-plugin
63111 +
63112 +ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
63113 +KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0405, -fplugin=$(objtree)/tools/gcc/pax_plugin.so -fplugin-arg-pax_plugin-track-lowest-sp=100)
63114 +endif
63115 +pax-plugin:
63116 +ifneq (,$(findstring pax_plugin, $(KBUILD_CFLAGS)))
63117 + $(Q)$(MAKE) $(build)=tools/gcc
63118 +else
63119 +ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
63120 + $(Q)echo "warning, your gcc does not support plugins, PAX_MEMORY_STACKLEAK will be less secure"
63121 +endif
63122 +endif
63123
63124 # prepare3 is used to check if we are building in a separate output directory,
63125 # and if so do:
63126 @@ -970,7 +985,7 @@ ifneq ($(KBUILD_SRC),)
63127 endif
63128
63129 # prepare2 creates a makefile if using a separate output directory
63130 -prepare2: prepare3 outputmakefile
63131 +prepare2: prepare3 outputmakefile pax-plugin
63132
63133 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
63134 include/asm include/config/auto.conf
63135 @@ -1198,7 +1213,7 @@ MRPROPER_FILES += .config .config.old in
63136 include/linux/autoconf.h include/linux/version.h \
63137 include/linux/utsrelease.h \
63138 include/linux/bounds.h include/asm*/asm-offsets.h \
63139 - Module.symvers Module.markers tags TAGS cscope*
63140 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
63141
63142 # clean - Delete most, but leave enough to build external modules
63143 #
63144 @@ -1289,6 +1304,7 @@ help:
63145 @echo ' modules_prepare - Set up for building external modules'
63146 @echo ' tags/TAGS - Generate tags file for editors'
63147 @echo ' cscope - Generate cscope index'
63148 + @echo ' gtags - Generate GNU GLOBAL index'
63149 @echo ' kernelrelease - Output the release version string'
63150 @echo ' kernelversion - Output the version stored in Makefile'
63151 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
63152 @@ -1445,7 +1461,7 @@ endif # KBUILD_EXTMOD
63153 quiet_cmd_tags = GEN $@
63154 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
63155
63156 -tags TAGS cscope: FORCE
63157 +tags TAGS cscope gtags: FORCE
63158 $(call cmd,tags)
63159
63160 # Scripts to check various things for consistency
63161 diff -urNp linux-2.6.32.42/mm/backing-dev.c linux-2.6.32.42/mm/backing-dev.c
63162 --- linux-2.6.32.42/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
63163 +++ linux-2.6.32.42/mm/backing-dev.c 2011-05-04 17:56:28.000000000 -0400
63164 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
63165 * Add the default flusher task that gets created for any bdi
63166 * that has dirty data pending writeout
63167 */
63168 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63169 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63170 {
63171 if (!bdi_cap_writeback_dirty(bdi))
63172 return;
63173 diff -urNp linux-2.6.32.42/mm/filemap.c linux-2.6.32.42/mm/filemap.c
63174 --- linux-2.6.32.42/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
63175 +++ linux-2.6.32.42/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
63176 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
63177 struct address_space *mapping = file->f_mapping;
63178
63179 if (!mapping->a_ops->readpage)
63180 - return -ENOEXEC;
63181 + return -ENODEV;
63182 file_accessed(file);
63183 vma->vm_ops = &generic_file_vm_ops;
63184 vma->vm_flags |= VM_CAN_NONLINEAR;
63185 @@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
63186 *pos = i_size_read(inode);
63187
63188 if (limit != RLIM_INFINITY) {
63189 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
63190 if (*pos >= limit) {
63191 send_sig(SIGXFSZ, current, 0);
63192 return -EFBIG;
63193 diff -urNp linux-2.6.32.42/mm/fremap.c linux-2.6.32.42/mm/fremap.c
63194 --- linux-2.6.32.42/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
63195 +++ linux-2.6.32.42/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
63196 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63197 retry:
63198 vma = find_vma(mm, start);
63199
63200 +#ifdef CONFIG_PAX_SEGMEXEC
63201 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
63202 + goto out;
63203 +#endif
63204 +
63205 /*
63206 * Make sure the vma is shared, that it supports prefaulting,
63207 * and that the remapped range is valid and fully within
63208 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63209 /*
63210 * drop PG_Mlocked flag for over-mapped range
63211 */
63212 - unsigned int saved_flags = vma->vm_flags;
63213 + unsigned long saved_flags = vma->vm_flags;
63214 munlock_vma_pages_range(vma, start, start + size);
63215 vma->vm_flags = saved_flags;
63216 }
63217 diff -urNp linux-2.6.32.42/mm/highmem.c linux-2.6.32.42/mm/highmem.c
63218 --- linux-2.6.32.42/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
63219 +++ linux-2.6.32.42/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
63220 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
63221 * So no dangers, even with speculative execution.
63222 */
63223 page = pte_page(pkmap_page_table[i]);
63224 + pax_open_kernel();
63225 pte_clear(&init_mm, (unsigned long)page_address(page),
63226 &pkmap_page_table[i]);
63227 -
63228 + pax_close_kernel();
63229 set_page_address(page, NULL);
63230 need_flush = 1;
63231 }
63232 @@ -177,9 +178,11 @@ start:
63233 }
63234 }
63235 vaddr = PKMAP_ADDR(last_pkmap_nr);
63236 +
63237 + pax_open_kernel();
63238 set_pte_at(&init_mm, vaddr,
63239 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
63240 -
63241 + pax_close_kernel();
63242 pkmap_count[last_pkmap_nr] = 1;
63243 set_page_address(page, (void *)vaddr);
63244
63245 diff -urNp linux-2.6.32.42/mm/hugetlb.c linux-2.6.32.42/mm/hugetlb.c
63246 --- linux-2.6.32.42/mm/hugetlb.c 2011-06-25 12:55:35.000000000 -0400
63247 +++ linux-2.6.32.42/mm/hugetlb.c 2011-06-25 12:56:37.000000000 -0400
63248 @@ -1925,6 +1925,26 @@ static int unmap_ref_private(struct mm_s
63249 return 1;
63250 }
63251
63252 +#ifdef CONFIG_PAX_SEGMEXEC
63253 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
63254 +{
63255 + struct mm_struct *mm = vma->vm_mm;
63256 + struct vm_area_struct *vma_m;
63257 + unsigned long address_m;
63258 + pte_t *ptep_m;
63259 +
63260 + vma_m = pax_find_mirror_vma(vma);
63261 + if (!vma_m)
63262 + return;
63263 +
63264 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63265 + address_m = address + SEGMEXEC_TASK_SIZE;
63266 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
63267 + get_page(page_m);
63268 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
63269 +}
63270 +#endif
63271 +
63272 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
63273 unsigned long address, pte_t *ptep, pte_t pte,
63274 struct page *pagecache_page)
63275 @@ -1996,6 +2016,11 @@ retry_avoidcopy:
63276 huge_ptep_clear_flush(vma, address, ptep);
63277 set_huge_pte_at(mm, address, ptep,
63278 make_huge_pte(vma, new_page, 1));
63279 +
63280 +#ifdef CONFIG_PAX_SEGMEXEC
63281 + pax_mirror_huge_pte(vma, address, new_page);
63282 +#endif
63283 +
63284 /* Make the old page be freed below */
63285 new_page = old_page;
63286 }
63287 @@ -2127,6 +2152,10 @@ retry:
63288 && (vma->vm_flags & VM_SHARED)));
63289 set_huge_pte_at(mm, address, ptep, new_pte);
63290
63291 +#ifdef CONFIG_PAX_SEGMEXEC
63292 + pax_mirror_huge_pte(vma, address, page);
63293 +#endif
63294 +
63295 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
63296 /* Optimization, do the COW without a second fault */
63297 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
63298 @@ -2155,6 +2184,28 @@ int hugetlb_fault(struct mm_struct *mm,
63299 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
63300 struct hstate *h = hstate_vma(vma);
63301
63302 +#ifdef CONFIG_PAX_SEGMEXEC
63303 + struct vm_area_struct *vma_m;
63304 +
63305 + vma_m = pax_find_mirror_vma(vma);
63306 + if (vma_m) {
63307 + unsigned long address_m;
63308 +
63309 + if (vma->vm_start > vma_m->vm_start) {
63310 + address_m = address;
63311 + address -= SEGMEXEC_TASK_SIZE;
63312 + vma = vma_m;
63313 + h = hstate_vma(vma);
63314 + } else
63315 + address_m = address + SEGMEXEC_TASK_SIZE;
63316 +
63317 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
63318 + return VM_FAULT_OOM;
63319 + address_m &= HPAGE_MASK;
63320 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
63321 + }
63322 +#endif
63323 +
63324 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
63325 if (!ptep)
63326 return VM_FAULT_OOM;
63327 diff -urNp linux-2.6.32.42/mm/Kconfig linux-2.6.32.42/mm/Kconfig
63328 --- linux-2.6.32.42/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
63329 +++ linux-2.6.32.42/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
63330 @@ -228,7 +228,7 @@ config KSM
63331 config DEFAULT_MMAP_MIN_ADDR
63332 int "Low address space to protect from user allocation"
63333 depends on MMU
63334 - default 4096
63335 + default 65536
63336 help
63337 This is the portion of low virtual memory which should be protected
63338 from userspace allocation. Keeping a user from writing to low pages
63339 diff -urNp linux-2.6.32.42/mm/kmemleak.c linux-2.6.32.42/mm/kmemleak.c
63340 --- linux-2.6.32.42/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
63341 +++ linux-2.6.32.42/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
63342 @@ -358,7 +358,7 @@ static void print_unreferenced(struct se
63343
63344 for (i = 0; i < object->trace_len; i++) {
63345 void *ptr = (void *)object->trace[i];
63346 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
63347 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
63348 }
63349 }
63350
63351 diff -urNp linux-2.6.32.42/mm/ksm.c linux-2.6.32.42/mm/ksm.c
63352 --- linux-2.6.32.42/mm/ksm.c 2011-03-27 14:31:47.000000000 -0400
63353 +++ linux-2.6.32.42/mm/ksm.c 2011-06-20 19:38:36.000000000 -0400
63354 @@ -1215,6 +1215,12 @@ static struct rmap_item *scan_get_next_r
63355 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
63356 ksm_scan.mm_slot = slot;
63357 spin_unlock(&ksm_mmlist_lock);
63358 + /*
63359 + * Although we tested list_empty() above, a racing __ksm_exit
63360 + * of the last mm on the list may have removed it since then.
63361 + */
63362 + if (slot == &ksm_mm_head)
63363 + return NULL;
63364 next_mm:
63365 ksm_scan.address = 0;
63366 ksm_scan.rmap_item = list_entry(&slot->rmap_list,
63367 diff -urNp linux-2.6.32.42/mm/maccess.c linux-2.6.32.42/mm/maccess.c
63368 --- linux-2.6.32.42/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
63369 +++ linux-2.6.32.42/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
63370 @@ -14,7 +14,7 @@
63371 * Safely read from address @src to the buffer at @dst. If a kernel fault
63372 * happens, handle that and return -EFAULT.
63373 */
63374 -long probe_kernel_read(void *dst, void *src, size_t size)
63375 +long probe_kernel_read(void *dst, const void *src, size_t size)
63376 {
63377 long ret;
63378 mm_segment_t old_fs = get_fs();
63379 @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
63380 * Safely write to address @dst from the buffer at @src. If a kernel fault
63381 * happens, handle that and return -EFAULT.
63382 */
63383 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
63384 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
63385 {
63386 long ret;
63387 mm_segment_t old_fs = get_fs();
63388 diff -urNp linux-2.6.32.42/mm/madvise.c linux-2.6.32.42/mm/madvise.c
63389 --- linux-2.6.32.42/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
63390 +++ linux-2.6.32.42/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
63391 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
63392 pgoff_t pgoff;
63393 unsigned long new_flags = vma->vm_flags;
63394
63395 +#ifdef CONFIG_PAX_SEGMEXEC
63396 + struct vm_area_struct *vma_m;
63397 +#endif
63398 +
63399 switch (behavior) {
63400 case MADV_NORMAL:
63401 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
63402 @@ -103,6 +107,13 @@ success:
63403 /*
63404 * vm_flags is protected by the mmap_sem held in write mode.
63405 */
63406 +
63407 +#ifdef CONFIG_PAX_SEGMEXEC
63408 + vma_m = pax_find_mirror_vma(vma);
63409 + if (vma_m)
63410 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
63411 +#endif
63412 +
63413 vma->vm_flags = new_flags;
63414
63415 out:
63416 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
63417 struct vm_area_struct ** prev,
63418 unsigned long start, unsigned long end)
63419 {
63420 +
63421 +#ifdef CONFIG_PAX_SEGMEXEC
63422 + struct vm_area_struct *vma_m;
63423 +#endif
63424 +
63425 *prev = vma;
63426 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
63427 return -EINVAL;
63428 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
63429 zap_page_range(vma, start, end - start, &details);
63430 } else
63431 zap_page_range(vma, start, end - start, NULL);
63432 +
63433 +#ifdef CONFIG_PAX_SEGMEXEC
63434 + vma_m = pax_find_mirror_vma(vma);
63435 + if (vma_m) {
63436 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
63437 + struct zap_details details = {
63438 + .nonlinear_vma = vma_m,
63439 + .last_index = ULONG_MAX,
63440 + };
63441 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
63442 + } else
63443 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
63444 + }
63445 +#endif
63446 +
63447 return 0;
63448 }
63449
63450 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
63451 if (end < start)
63452 goto out;
63453
63454 +#ifdef CONFIG_PAX_SEGMEXEC
63455 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63456 + if (end > SEGMEXEC_TASK_SIZE)
63457 + goto out;
63458 + } else
63459 +#endif
63460 +
63461 + if (end > TASK_SIZE)
63462 + goto out;
63463 +
63464 error = 0;
63465 if (end == start)
63466 goto out;
63467 diff -urNp linux-2.6.32.42/mm/memory.c linux-2.6.32.42/mm/memory.c
63468 --- linux-2.6.32.42/mm/memory.c 2011-03-27 14:31:47.000000000 -0400
63469 +++ linux-2.6.32.42/mm/memory.c 2011-04-17 15:56:46.000000000 -0400
63470 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
63471 return;
63472
63473 pmd = pmd_offset(pud, start);
63474 +
63475 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
63476 pud_clear(pud);
63477 pmd_free_tlb(tlb, pmd, start);
63478 +#endif
63479 +
63480 }
63481
63482 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
63483 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct
63484 if (end - 1 > ceiling - 1)
63485 return;
63486
63487 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
63488 pud = pud_offset(pgd, start);
63489 pgd_clear(pgd);
63490 pud_free_tlb(tlb, pud, start);
63491 +#endif
63492 +
63493 }
63494
63495 /*
63496 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
63497 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
63498 i = 0;
63499
63500 - do {
63501 + while (nr_pages) {
63502 struct vm_area_struct *vma;
63503
63504 - vma = find_extend_vma(mm, start);
63505 + vma = find_vma(mm, start);
63506 if (!vma && in_gate_area(tsk, start)) {
63507 unsigned long pg = start & PAGE_MASK;
63508 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
63509 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
63510 continue;
63511 }
63512
63513 - if (!vma ||
63514 + if (!vma || start < vma->vm_start ||
63515 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
63516 !(vm_flags & vma->vm_flags))
63517 return i ? : -EFAULT;
63518 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
63519 start += PAGE_SIZE;
63520 nr_pages--;
63521 } while (nr_pages && start < vma->vm_end);
63522 - } while (nr_pages);
63523 + }
63524 return i;
63525 }
63526
63527 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
63528 page_add_file_rmap(page);
63529 set_pte_at(mm, addr, pte, mk_pte(page, prot));
63530
63531 +#ifdef CONFIG_PAX_SEGMEXEC
63532 + pax_mirror_file_pte(vma, addr, page, ptl);
63533 +#endif
63534 +
63535 retval = 0;
63536 pte_unmap_unlock(pte, ptl);
63537 return retval;
63538 @@ -1560,10 +1571,22 @@ out:
63539 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
63540 struct page *page)
63541 {
63542 +
63543 +#ifdef CONFIG_PAX_SEGMEXEC
63544 + struct vm_area_struct *vma_m;
63545 +#endif
63546 +
63547 if (addr < vma->vm_start || addr >= vma->vm_end)
63548 return -EFAULT;
63549 if (!page_count(page))
63550 return -EINVAL;
63551 +
63552 +#ifdef CONFIG_PAX_SEGMEXEC
63553 + vma_m = pax_find_mirror_vma(vma);
63554 + if (vma_m)
63555 + vma_m->vm_flags |= VM_INSERTPAGE;
63556 +#endif
63557 +
63558 vma->vm_flags |= VM_INSERTPAGE;
63559 return insert_page(vma, addr, page, vma->vm_page_prot);
63560 }
63561 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
63562 unsigned long pfn)
63563 {
63564 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
63565 + BUG_ON(vma->vm_mirror);
63566
63567 if (addr < vma->vm_start || addr >= vma->vm_end)
63568 return -EFAULT;
63569 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
63570 copy_user_highpage(dst, src, va, vma);
63571 }
63572
63573 +#ifdef CONFIG_PAX_SEGMEXEC
63574 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
63575 +{
63576 + struct mm_struct *mm = vma->vm_mm;
63577 + spinlock_t *ptl;
63578 + pte_t *pte, entry;
63579 +
63580 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
63581 + entry = *pte;
63582 + if (!pte_present(entry)) {
63583 + if (!pte_none(entry)) {
63584 + BUG_ON(pte_file(entry));
63585 + free_swap_and_cache(pte_to_swp_entry(entry));
63586 + pte_clear_not_present_full(mm, address, pte, 0);
63587 + }
63588 + } else {
63589 + struct page *page;
63590 +
63591 + flush_cache_page(vma, address, pte_pfn(entry));
63592 + entry = ptep_clear_flush(vma, address, pte);
63593 + BUG_ON(pte_dirty(entry));
63594 + page = vm_normal_page(vma, address, entry);
63595 + if (page) {
63596 + update_hiwater_rss(mm);
63597 + if (PageAnon(page))
63598 + dec_mm_counter(mm, anon_rss);
63599 + else
63600 + dec_mm_counter(mm, file_rss);
63601 + page_remove_rmap(page);
63602 + page_cache_release(page);
63603 + }
63604 + }
63605 + pte_unmap_unlock(pte, ptl);
63606 +}
63607 +
63608 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
63609 + *
63610 + * the ptl of the lower mapped page is held on entry and is not released on exit
63611 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
63612 + */
63613 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63614 +{
63615 + struct mm_struct *mm = vma->vm_mm;
63616 + unsigned long address_m;
63617 + spinlock_t *ptl_m;
63618 + struct vm_area_struct *vma_m;
63619 + pmd_t *pmd_m;
63620 + pte_t *pte_m, entry_m;
63621 +
63622 + BUG_ON(!page_m || !PageAnon(page_m));
63623 +
63624 + vma_m = pax_find_mirror_vma(vma);
63625 + if (!vma_m)
63626 + return;
63627 +
63628 + BUG_ON(!PageLocked(page_m));
63629 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63630 + address_m = address + SEGMEXEC_TASK_SIZE;
63631 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63632 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63633 + ptl_m = pte_lockptr(mm, pmd_m);
63634 + if (ptl != ptl_m) {
63635 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63636 + if (!pte_none(*pte_m))
63637 + goto out;
63638 + }
63639 +
63640 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63641 + page_cache_get(page_m);
63642 + page_add_anon_rmap(page_m, vma_m, address_m);
63643 + inc_mm_counter(mm, anon_rss);
63644 + set_pte_at(mm, address_m, pte_m, entry_m);
63645 + update_mmu_cache(vma_m, address_m, entry_m);
63646 +out:
63647 + if (ptl != ptl_m)
63648 + spin_unlock(ptl_m);
63649 + pte_unmap_nested(pte_m);
63650 + unlock_page(page_m);
63651 +}
63652 +
63653 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63654 +{
63655 + struct mm_struct *mm = vma->vm_mm;
63656 + unsigned long address_m;
63657 + spinlock_t *ptl_m;
63658 + struct vm_area_struct *vma_m;
63659 + pmd_t *pmd_m;
63660 + pte_t *pte_m, entry_m;
63661 +
63662 + BUG_ON(!page_m || PageAnon(page_m));
63663 +
63664 + vma_m = pax_find_mirror_vma(vma);
63665 + if (!vma_m)
63666 + return;
63667 +
63668 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63669 + address_m = address + SEGMEXEC_TASK_SIZE;
63670 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63671 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63672 + ptl_m = pte_lockptr(mm, pmd_m);
63673 + if (ptl != ptl_m) {
63674 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63675 + if (!pte_none(*pte_m))
63676 + goto out;
63677 + }
63678 +
63679 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63680 + page_cache_get(page_m);
63681 + page_add_file_rmap(page_m);
63682 + inc_mm_counter(mm, file_rss);
63683 + set_pte_at(mm, address_m, pte_m, entry_m);
63684 + update_mmu_cache(vma_m, address_m, entry_m);
63685 +out:
63686 + if (ptl != ptl_m)
63687 + spin_unlock(ptl_m);
63688 + pte_unmap_nested(pte_m);
63689 +}
63690 +
63691 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
63692 +{
63693 + struct mm_struct *mm = vma->vm_mm;
63694 + unsigned long address_m;
63695 + spinlock_t *ptl_m;
63696 + struct vm_area_struct *vma_m;
63697 + pmd_t *pmd_m;
63698 + pte_t *pte_m, entry_m;
63699 +
63700 + vma_m = pax_find_mirror_vma(vma);
63701 + if (!vma_m)
63702 + return;
63703 +
63704 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63705 + address_m = address + SEGMEXEC_TASK_SIZE;
63706 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63707 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63708 + ptl_m = pte_lockptr(mm, pmd_m);
63709 + if (ptl != ptl_m) {
63710 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63711 + if (!pte_none(*pte_m))
63712 + goto out;
63713 + }
63714 +
63715 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
63716 + set_pte_at(mm, address_m, pte_m, entry_m);
63717 +out:
63718 + if (ptl != ptl_m)
63719 + spin_unlock(ptl_m);
63720 + pte_unmap_nested(pte_m);
63721 +}
63722 +
63723 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
63724 +{
63725 + struct page *page_m;
63726 + pte_t entry;
63727 +
63728 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
63729 + goto out;
63730 +
63731 + entry = *pte;
63732 + page_m = vm_normal_page(vma, address, entry);
63733 + if (!page_m)
63734 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
63735 + else if (PageAnon(page_m)) {
63736 + if (pax_find_mirror_vma(vma)) {
63737 + pte_unmap_unlock(pte, ptl);
63738 + lock_page(page_m);
63739 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
63740 + if (pte_same(entry, *pte))
63741 + pax_mirror_anon_pte(vma, address, page_m, ptl);
63742 + else
63743 + unlock_page(page_m);
63744 + }
63745 + } else
63746 + pax_mirror_file_pte(vma, address, page_m, ptl);
63747 +
63748 +out:
63749 + pte_unmap_unlock(pte, ptl);
63750 +}
63751 +#endif
63752 +
63753 /*
63754 * This routine handles present pages, when users try to write
63755 * to a shared page. It is done by copying the page to a new address
63756 @@ -2156,6 +2360,12 @@ gotten:
63757 */
63758 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63759 if (likely(pte_same(*page_table, orig_pte))) {
63760 +
63761 +#ifdef CONFIG_PAX_SEGMEXEC
63762 + if (pax_find_mirror_vma(vma))
63763 + BUG_ON(!trylock_page(new_page));
63764 +#endif
63765 +
63766 if (old_page) {
63767 if (!PageAnon(old_page)) {
63768 dec_mm_counter(mm, file_rss);
63769 @@ -2207,6 +2417,10 @@ gotten:
63770 page_remove_rmap(old_page);
63771 }
63772
63773 +#ifdef CONFIG_PAX_SEGMEXEC
63774 + pax_mirror_anon_pte(vma, address, new_page, ptl);
63775 +#endif
63776 +
63777 /* Free the old page.. */
63778 new_page = old_page;
63779 ret |= VM_FAULT_WRITE;
63780 @@ -2604,6 +2818,11 @@ static int do_swap_page(struct mm_struct
63781 swap_free(entry);
63782 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
63783 try_to_free_swap(page);
63784 +
63785 +#ifdef CONFIG_PAX_SEGMEXEC
63786 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
63787 +#endif
63788 +
63789 unlock_page(page);
63790
63791 if (flags & FAULT_FLAG_WRITE) {
63792 @@ -2615,6 +2834,11 @@ static int do_swap_page(struct mm_struct
63793
63794 /* No need to invalidate - it was non-present before */
63795 update_mmu_cache(vma, address, pte);
63796 +
63797 +#ifdef CONFIG_PAX_SEGMEXEC
63798 + pax_mirror_anon_pte(vma, address, page, ptl);
63799 +#endif
63800 +
63801 unlock:
63802 pte_unmap_unlock(page_table, ptl);
63803 out:
63804 @@ -2630,40 +2854,6 @@ out_release:
63805 }
63806
63807 /*
63808 - * This is like a special single-page "expand_{down|up}wards()",
63809 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
63810 - * doesn't hit another vma.
63811 - */
63812 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
63813 -{
63814 - address &= PAGE_MASK;
63815 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
63816 - struct vm_area_struct *prev = vma->vm_prev;
63817 -
63818 - /*
63819 - * Is there a mapping abutting this one below?
63820 - *
63821 - * That's only ok if it's the same stack mapping
63822 - * that has gotten split..
63823 - */
63824 - if (prev && prev->vm_end == address)
63825 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
63826 -
63827 - expand_stack(vma, address - PAGE_SIZE);
63828 - }
63829 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
63830 - struct vm_area_struct *next = vma->vm_next;
63831 -
63832 - /* As VM_GROWSDOWN but s/below/above/ */
63833 - if (next && next->vm_start == address + PAGE_SIZE)
63834 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
63835 -
63836 - expand_upwards(vma, address + PAGE_SIZE);
63837 - }
63838 - return 0;
63839 -}
63840 -
63841 -/*
63842 * We enter with non-exclusive mmap_sem (to exclude vma changes,
63843 * but allow concurrent faults), and pte mapped but not yet locked.
63844 * We return with mmap_sem still held, but pte unmapped and unlocked.
63845 @@ -2672,27 +2862,23 @@ static int do_anonymous_page(struct mm_s
63846 unsigned long address, pte_t *page_table, pmd_t *pmd,
63847 unsigned int flags)
63848 {
63849 - struct page *page;
63850 + struct page *page = NULL;
63851 spinlock_t *ptl;
63852 pte_t entry;
63853
63854 - pte_unmap(page_table);
63855 -
63856 - /* Check if we need to add a guard page to the stack */
63857 - if (check_stack_guard_page(vma, address) < 0)
63858 - return VM_FAULT_SIGBUS;
63859 -
63860 - /* Use the zero-page for reads */
63861 if (!(flags & FAULT_FLAG_WRITE)) {
63862 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
63863 vma->vm_page_prot));
63864 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63865 + ptl = pte_lockptr(mm, pmd);
63866 + spin_lock(ptl);
63867 if (!pte_none(*page_table))
63868 goto unlock;
63869 goto setpte;
63870 }
63871
63872 /* Allocate our own private page. */
63873 + pte_unmap(page_table);
63874 +
63875 if (unlikely(anon_vma_prepare(vma)))
63876 goto oom;
63877 page = alloc_zeroed_user_highpage_movable(vma, address);
63878 @@ -2711,6 +2897,11 @@ static int do_anonymous_page(struct mm_s
63879 if (!pte_none(*page_table))
63880 goto release;
63881
63882 +#ifdef CONFIG_PAX_SEGMEXEC
63883 + if (pax_find_mirror_vma(vma))
63884 + BUG_ON(!trylock_page(page));
63885 +#endif
63886 +
63887 inc_mm_counter(mm, anon_rss);
63888 page_add_new_anon_rmap(page, vma, address);
63889 setpte:
63890 @@ -2718,6 +2909,12 @@ setpte:
63891
63892 /* No need to invalidate - it was non-present before */
63893 update_mmu_cache(vma, address, entry);
63894 +
63895 +#ifdef CONFIG_PAX_SEGMEXEC
63896 + if (page)
63897 + pax_mirror_anon_pte(vma, address, page, ptl);
63898 +#endif
63899 +
63900 unlock:
63901 pte_unmap_unlock(page_table, ptl);
63902 return 0;
63903 @@ -2860,6 +3057,12 @@ static int __do_fault(struct mm_struct *
63904 */
63905 /* Only go through if we didn't race with anybody else... */
63906 if (likely(pte_same(*page_table, orig_pte))) {
63907 +
63908 +#ifdef CONFIG_PAX_SEGMEXEC
63909 + if (anon && pax_find_mirror_vma(vma))
63910 + BUG_ON(!trylock_page(page));
63911 +#endif
63912 +
63913 flush_icache_page(vma, page);
63914 entry = mk_pte(page, vma->vm_page_prot);
63915 if (flags & FAULT_FLAG_WRITE)
63916 @@ -2879,6 +3082,14 @@ static int __do_fault(struct mm_struct *
63917
63918 /* no need to invalidate: a not-present page won't be cached */
63919 update_mmu_cache(vma, address, entry);
63920 +
63921 +#ifdef CONFIG_PAX_SEGMEXEC
63922 + if (anon)
63923 + pax_mirror_anon_pte(vma, address, page, ptl);
63924 + else
63925 + pax_mirror_file_pte(vma, address, page, ptl);
63926 +#endif
63927 +
63928 } else {
63929 if (charged)
63930 mem_cgroup_uncharge_page(page);
63931 @@ -3026,6 +3237,12 @@ static inline int handle_pte_fault(struc
63932 if (flags & FAULT_FLAG_WRITE)
63933 flush_tlb_page(vma, address);
63934 }
63935 +
63936 +#ifdef CONFIG_PAX_SEGMEXEC
63937 + pax_mirror_pte(vma, address, pte, pmd, ptl);
63938 + return 0;
63939 +#endif
63940 +
63941 unlock:
63942 pte_unmap_unlock(pte, ptl);
63943 return 0;
63944 @@ -3042,6 +3259,10 @@ int handle_mm_fault(struct mm_struct *mm
63945 pmd_t *pmd;
63946 pte_t *pte;
63947
63948 +#ifdef CONFIG_PAX_SEGMEXEC
63949 + struct vm_area_struct *vma_m;
63950 +#endif
63951 +
63952 __set_current_state(TASK_RUNNING);
63953
63954 count_vm_event(PGFAULT);
63955 @@ -3049,6 +3270,34 @@ int handle_mm_fault(struct mm_struct *mm
63956 if (unlikely(is_vm_hugetlb_page(vma)))
63957 return hugetlb_fault(mm, vma, address, flags);
63958
63959 +#ifdef CONFIG_PAX_SEGMEXEC
63960 + vma_m = pax_find_mirror_vma(vma);
63961 + if (vma_m) {
63962 + unsigned long address_m;
63963 + pgd_t *pgd_m;
63964 + pud_t *pud_m;
63965 + pmd_t *pmd_m;
63966 +
63967 + if (vma->vm_start > vma_m->vm_start) {
63968 + address_m = address;
63969 + address -= SEGMEXEC_TASK_SIZE;
63970 + vma = vma_m;
63971 + } else
63972 + address_m = address + SEGMEXEC_TASK_SIZE;
63973 +
63974 + pgd_m = pgd_offset(mm, address_m);
63975 + pud_m = pud_alloc(mm, pgd_m, address_m);
63976 + if (!pud_m)
63977 + return VM_FAULT_OOM;
63978 + pmd_m = pmd_alloc(mm, pud_m, address_m);
63979 + if (!pmd_m)
63980 + return VM_FAULT_OOM;
63981 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
63982 + return VM_FAULT_OOM;
63983 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
63984 + }
63985 +#endif
63986 +
63987 pgd = pgd_offset(mm, address);
63988 pud = pud_alloc(mm, pgd, address);
63989 if (!pud)
63990 @@ -3146,7 +3395,7 @@ static int __init gate_vma_init(void)
63991 gate_vma.vm_start = FIXADDR_USER_START;
63992 gate_vma.vm_end = FIXADDR_USER_END;
63993 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
63994 - gate_vma.vm_page_prot = __P101;
63995 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
63996 /*
63997 * Make sure the vDSO gets into every core dump.
63998 * Dumping its contents makes post-mortem fully interpretable later
63999 diff -urNp linux-2.6.32.42/mm/memory-failure.c linux-2.6.32.42/mm/memory-failure.c
64000 --- linux-2.6.32.42/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
64001 +++ linux-2.6.32.42/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
64002 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
64003
64004 int sysctl_memory_failure_recovery __read_mostly = 1;
64005
64006 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64007 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64008
64009 /*
64010 * Send all the processes who have the page mapped an ``action optional''
64011 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
64012 return 0;
64013 }
64014
64015 - atomic_long_add(1, &mce_bad_pages);
64016 + atomic_long_add_unchecked(1, &mce_bad_pages);
64017
64018 /*
64019 * We need/can do nothing about count=0 pages.
64020 diff -urNp linux-2.6.32.42/mm/mempolicy.c linux-2.6.32.42/mm/mempolicy.c
64021 --- linux-2.6.32.42/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
64022 +++ linux-2.6.32.42/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
64023 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
64024 struct vm_area_struct *next;
64025 int err;
64026
64027 +#ifdef CONFIG_PAX_SEGMEXEC
64028 + struct vm_area_struct *vma_m;
64029 +#endif
64030 +
64031 err = 0;
64032 for (; vma && vma->vm_start < end; vma = next) {
64033 next = vma->vm_next;
64034 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
64035 err = policy_vma(vma, new);
64036 if (err)
64037 break;
64038 +
64039 +#ifdef CONFIG_PAX_SEGMEXEC
64040 + vma_m = pax_find_mirror_vma(vma);
64041 + if (vma_m) {
64042 + err = policy_vma(vma_m, new);
64043 + if (err)
64044 + break;
64045 + }
64046 +#endif
64047 +
64048 }
64049 return err;
64050 }
64051 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
64052
64053 if (end < start)
64054 return -EINVAL;
64055 +
64056 +#ifdef CONFIG_PAX_SEGMEXEC
64057 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
64058 + if (end > SEGMEXEC_TASK_SIZE)
64059 + return -EINVAL;
64060 + } else
64061 +#endif
64062 +
64063 + if (end > TASK_SIZE)
64064 + return -EINVAL;
64065 +
64066 if (end == start)
64067 return 0;
64068
64069 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64070 if (!mm)
64071 return -EINVAL;
64072
64073 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64074 + if (mm != current->mm &&
64075 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64076 + err = -EPERM;
64077 + goto out;
64078 + }
64079 +#endif
64080 +
64081 /*
64082 * Check if this process has the right to modify the specified
64083 * process. The right exists if the process has administrative
64084 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64085 rcu_read_lock();
64086 tcred = __task_cred(task);
64087 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64088 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
64089 - !capable(CAP_SYS_NICE)) {
64090 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64091 rcu_read_unlock();
64092 err = -EPERM;
64093 goto out;
64094 @@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
64095
64096 if (file) {
64097 seq_printf(m, " file=");
64098 - seq_path(m, &file->f_path, "\n\t= ");
64099 + seq_path(m, &file->f_path, "\n\t\\= ");
64100 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
64101 seq_printf(m, " heap");
64102 } else if (vma->vm_start <= mm->start_stack &&
64103 diff -urNp linux-2.6.32.42/mm/migrate.c linux-2.6.32.42/mm/migrate.c
64104 --- linux-2.6.32.42/mm/migrate.c 2011-03-27 14:31:47.000000000 -0400
64105 +++ linux-2.6.32.42/mm/migrate.c 2011-05-16 21:46:57.000000000 -0400
64106 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
64107 unsigned long chunk_start;
64108 int err;
64109
64110 + pax_track_stack();
64111 +
64112 task_nodes = cpuset_mems_allowed(task);
64113
64114 err = -ENOMEM;
64115 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64116 if (!mm)
64117 return -EINVAL;
64118
64119 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64120 + if (mm != current->mm &&
64121 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64122 + err = -EPERM;
64123 + goto out;
64124 + }
64125 +#endif
64126 +
64127 /*
64128 * Check if this process has the right to modify the specified
64129 * process. The right exists if the process has administrative
64130 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64131 rcu_read_lock();
64132 tcred = __task_cred(task);
64133 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64134 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
64135 - !capable(CAP_SYS_NICE)) {
64136 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64137 rcu_read_unlock();
64138 err = -EPERM;
64139 goto out;
64140 diff -urNp linux-2.6.32.42/mm/mlock.c linux-2.6.32.42/mm/mlock.c
64141 --- linux-2.6.32.42/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
64142 +++ linux-2.6.32.42/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
64143 @@ -13,6 +13,7 @@
64144 #include <linux/pagemap.h>
64145 #include <linux/mempolicy.h>
64146 #include <linux/syscalls.h>
64147 +#include <linux/security.h>
64148 #include <linux/sched.h>
64149 #include <linux/module.h>
64150 #include <linux/rmap.h>
64151 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
64152 }
64153 }
64154
64155 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
64156 -{
64157 - return (vma->vm_flags & VM_GROWSDOWN) &&
64158 - (vma->vm_start == addr) &&
64159 - !vma_stack_continue(vma->vm_prev, addr);
64160 -}
64161 -
64162 /**
64163 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
64164 * @vma: target vma
64165 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
64166 if (vma->vm_flags & VM_WRITE)
64167 gup_flags |= FOLL_WRITE;
64168
64169 - /* We don't try to access the guard page of a stack vma */
64170 - if (stack_guard_page(vma, start)) {
64171 - addr += PAGE_SIZE;
64172 - nr_pages--;
64173 - }
64174 -
64175 while (nr_pages > 0) {
64176 int i;
64177
64178 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
64179 {
64180 unsigned long nstart, end, tmp;
64181 struct vm_area_struct * vma, * prev;
64182 - int error;
64183 + int error = -EINVAL;
64184
64185 len = PAGE_ALIGN(len);
64186 end = start + len;
64187 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
64188 return -EINVAL;
64189 if (end == start)
64190 return 0;
64191 + if (end > TASK_SIZE)
64192 + return -EINVAL;
64193 +
64194 vma = find_vma_prev(current->mm, start, &prev);
64195 if (!vma || vma->vm_start > start)
64196 return -ENOMEM;
64197 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
64198 for (nstart = start ; ; ) {
64199 unsigned int newflags;
64200
64201 +#ifdef CONFIG_PAX_SEGMEXEC
64202 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64203 + break;
64204 +#endif
64205 +
64206 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
64207
64208 newflags = vma->vm_flags | VM_LOCKED;
64209 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
64210 lock_limit >>= PAGE_SHIFT;
64211
64212 /* check against resource limits */
64213 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
64214 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
64215 error = do_mlock(start, len, 1);
64216 up_write(&current->mm->mmap_sem);
64217 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
64218 static int do_mlockall(int flags)
64219 {
64220 struct vm_area_struct * vma, * prev = NULL;
64221 - unsigned int def_flags = 0;
64222
64223 if (flags & MCL_FUTURE)
64224 - def_flags = VM_LOCKED;
64225 - current->mm->def_flags = def_flags;
64226 + current->mm->def_flags |= VM_LOCKED;
64227 + else
64228 + current->mm->def_flags &= ~VM_LOCKED;
64229 if (flags == MCL_FUTURE)
64230 goto out;
64231
64232 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
64233 - unsigned int newflags;
64234 + unsigned long newflags;
64235 +
64236 +#ifdef CONFIG_PAX_SEGMEXEC
64237 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64238 + break;
64239 +#endif
64240
64241 + BUG_ON(vma->vm_end > TASK_SIZE);
64242 newflags = vma->vm_flags | VM_LOCKED;
64243 if (!(flags & MCL_CURRENT))
64244 newflags &= ~VM_LOCKED;
64245 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
64246 lock_limit >>= PAGE_SHIFT;
64247
64248 ret = -ENOMEM;
64249 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
64250 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
64251 capable(CAP_IPC_LOCK))
64252 ret = do_mlockall(flags);
64253 diff -urNp linux-2.6.32.42/mm/mmap.c linux-2.6.32.42/mm/mmap.c
64254 --- linux-2.6.32.42/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
64255 +++ linux-2.6.32.42/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
64256 @@ -45,6 +45,16 @@
64257 #define arch_rebalance_pgtables(addr, len) (addr)
64258 #endif
64259
64260 +static inline void verify_mm_writelocked(struct mm_struct *mm)
64261 +{
64262 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
64263 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
64264 + up_read(&mm->mmap_sem);
64265 + BUG();
64266 + }
64267 +#endif
64268 +}
64269 +
64270 static void unmap_region(struct mm_struct *mm,
64271 struct vm_area_struct *vma, struct vm_area_struct *prev,
64272 unsigned long start, unsigned long end);
64273 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
64274 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
64275 *
64276 */
64277 -pgprot_t protection_map[16] = {
64278 +pgprot_t protection_map[16] __read_only = {
64279 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
64280 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
64281 };
64282
64283 pgprot_t vm_get_page_prot(unsigned long vm_flags)
64284 {
64285 - return __pgprot(pgprot_val(protection_map[vm_flags &
64286 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
64287 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
64288 pgprot_val(arch_vm_get_page_prot(vm_flags)));
64289 +
64290 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64291 + if (!nx_enabled &&
64292 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
64293 + (vm_flags & (VM_READ | VM_WRITE)))
64294 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
64295 +#endif
64296 +
64297 + return prot;
64298 }
64299 EXPORT_SYMBOL(vm_get_page_prot);
64300
64301 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
64302 int sysctl_overcommit_ratio = 50; /* default is 50% */
64303 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
64304 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
64305 struct percpu_counter vm_committed_as;
64306
64307 /*
64308 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
64309 struct vm_area_struct *next = vma->vm_next;
64310
64311 might_sleep();
64312 + BUG_ON(vma->vm_mirror);
64313 if (vma->vm_ops && vma->vm_ops->close)
64314 vma->vm_ops->close(vma);
64315 if (vma->vm_file) {
64316 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
64317 * not page aligned -Ram Gupta
64318 */
64319 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
64320 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
64321 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
64322 (mm->end_data - mm->start_data) > rlim)
64323 goto out;
64324 @@ -704,6 +726,12 @@ static int
64325 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
64326 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64327 {
64328 +
64329 +#ifdef CONFIG_PAX_SEGMEXEC
64330 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
64331 + return 0;
64332 +#endif
64333 +
64334 if (is_mergeable_vma(vma, file, vm_flags) &&
64335 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
64336 if (vma->vm_pgoff == vm_pgoff)
64337 @@ -723,6 +751,12 @@ static int
64338 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
64339 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64340 {
64341 +
64342 +#ifdef CONFIG_PAX_SEGMEXEC
64343 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
64344 + return 0;
64345 +#endif
64346 +
64347 if (is_mergeable_vma(vma, file, vm_flags) &&
64348 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
64349 pgoff_t vm_pglen;
64350 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
64351 struct vm_area_struct *vma_merge(struct mm_struct *mm,
64352 struct vm_area_struct *prev, unsigned long addr,
64353 unsigned long end, unsigned long vm_flags,
64354 - struct anon_vma *anon_vma, struct file *file,
64355 + struct anon_vma *anon_vma, struct file *file,
64356 pgoff_t pgoff, struct mempolicy *policy)
64357 {
64358 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
64359 struct vm_area_struct *area, *next;
64360
64361 +#ifdef CONFIG_PAX_SEGMEXEC
64362 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
64363 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
64364 +
64365 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
64366 +#endif
64367 +
64368 /*
64369 * We later require that vma->vm_flags == vm_flags,
64370 * so this tests vma->vm_flags & VM_SPECIAL, too.
64371 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
64372 if (next && next->vm_end == end) /* cases 6, 7, 8 */
64373 next = next->vm_next;
64374
64375 +#ifdef CONFIG_PAX_SEGMEXEC
64376 + if (prev)
64377 + prev_m = pax_find_mirror_vma(prev);
64378 + if (area)
64379 + area_m = pax_find_mirror_vma(area);
64380 + if (next)
64381 + next_m = pax_find_mirror_vma(next);
64382 +#endif
64383 +
64384 /*
64385 * Can it merge with the predecessor?
64386 */
64387 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
64388 /* cases 1, 6 */
64389 vma_adjust(prev, prev->vm_start,
64390 next->vm_end, prev->vm_pgoff, NULL);
64391 - } else /* cases 2, 5, 7 */
64392 +
64393 +#ifdef CONFIG_PAX_SEGMEXEC
64394 + if (prev_m)
64395 + vma_adjust(prev_m, prev_m->vm_start,
64396 + next_m->vm_end, prev_m->vm_pgoff, NULL);
64397 +#endif
64398 +
64399 + } else { /* cases 2, 5, 7 */
64400 vma_adjust(prev, prev->vm_start,
64401 end, prev->vm_pgoff, NULL);
64402 +
64403 +#ifdef CONFIG_PAX_SEGMEXEC
64404 + if (prev_m)
64405 + vma_adjust(prev_m, prev_m->vm_start,
64406 + end_m, prev_m->vm_pgoff, NULL);
64407 +#endif
64408 +
64409 + }
64410 return prev;
64411 }
64412
64413 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
64414 mpol_equal(policy, vma_policy(next)) &&
64415 can_vma_merge_before(next, vm_flags,
64416 anon_vma, file, pgoff+pglen)) {
64417 - if (prev && addr < prev->vm_end) /* case 4 */
64418 + if (prev && addr < prev->vm_end) { /* case 4 */
64419 vma_adjust(prev, prev->vm_start,
64420 addr, prev->vm_pgoff, NULL);
64421 - else /* cases 3, 8 */
64422 +
64423 +#ifdef CONFIG_PAX_SEGMEXEC
64424 + if (prev_m)
64425 + vma_adjust(prev_m, prev_m->vm_start,
64426 + addr_m, prev_m->vm_pgoff, NULL);
64427 +#endif
64428 +
64429 + } else { /* cases 3, 8 */
64430 vma_adjust(area, addr, next->vm_end,
64431 next->vm_pgoff - pglen, NULL);
64432 +
64433 +#ifdef CONFIG_PAX_SEGMEXEC
64434 + if (area_m)
64435 + vma_adjust(area_m, addr_m, next_m->vm_end,
64436 + next_m->vm_pgoff - pglen, NULL);
64437 +#endif
64438 +
64439 + }
64440 return area;
64441 }
64442
64443 @@ -898,14 +978,11 @@ none:
64444 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
64445 struct file *file, long pages)
64446 {
64447 - const unsigned long stack_flags
64448 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
64449 -
64450 if (file) {
64451 mm->shared_vm += pages;
64452 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
64453 mm->exec_vm += pages;
64454 - } else if (flags & stack_flags)
64455 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
64456 mm->stack_vm += pages;
64457 if (flags & (VM_RESERVED|VM_IO))
64458 mm->reserved_vm += pages;
64459 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
64460 * (the exception is when the underlying filesystem is noexec
64461 * mounted, in which case we dont add PROT_EXEC.)
64462 */
64463 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
64464 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
64465 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
64466 prot |= PROT_EXEC;
64467
64468 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
64469 /* Obtain the address to map to. we verify (or select) it and ensure
64470 * that it represents a valid section of the address space.
64471 */
64472 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
64473 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
64474 if (addr & ~PAGE_MASK)
64475 return addr;
64476
64477 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
64478 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
64479 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
64480
64481 +#ifdef CONFIG_PAX_MPROTECT
64482 + if (mm->pax_flags & MF_PAX_MPROTECT) {
64483 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
64484 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
64485 + gr_log_rwxmmap(file);
64486 +
64487 +#ifdef CONFIG_PAX_EMUPLT
64488 + vm_flags &= ~VM_EXEC;
64489 +#else
64490 + return -EPERM;
64491 +#endif
64492 +
64493 + }
64494 +
64495 + if (!(vm_flags & VM_EXEC))
64496 + vm_flags &= ~VM_MAYEXEC;
64497 +#else
64498 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
64499 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
64500 +#endif
64501 + else
64502 + vm_flags &= ~VM_MAYWRITE;
64503 + }
64504 +#endif
64505 +
64506 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64507 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
64508 + vm_flags &= ~VM_PAGEEXEC;
64509 +#endif
64510 +
64511 if (flags & MAP_LOCKED)
64512 if (!can_do_mlock())
64513 return -EPERM;
64514 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
64515 locked += mm->locked_vm;
64516 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
64517 lock_limit >>= PAGE_SHIFT;
64518 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
64519 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
64520 return -EAGAIN;
64521 }
64522 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
64523 if (error)
64524 return error;
64525
64526 + if (!gr_acl_handle_mmap(file, prot))
64527 + return -EACCES;
64528 +
64529 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
64530 }
64531 EXPORT_SYMBOL(do_mmap_pgoff);
64532 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
64533 */
64534 int vma_wants_writenotify(struct vm_area_struct *vma)
64535 {
64536 - unsigned int vm_flags = vma->vm_flags;
64537 + unsigned long vm_flags = vma->vm_flags;
64538
64539 /* If it was private or non-writable, the write bit is already clear */
64540 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
64541 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
64542 return 0;
64543
64544 /* The backer wishes to know when pages are first written to? */
64545 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
64546 unsigned long charged = 0;
64547 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
64548
64549 +#ifdef CONFIG_PAX_SEGMEXEC
64550 + struct vm_area_struct *vma_m = NULL;
64551 +#endif
64552 +
64553 + /*
64554 + * mm->mmap_sem is required to protect against another thread
64555 + * changing the mappings in case we sleep.
64556 + */
64557 + verify_mm_writelocked(mm);
64558 +
64559 /* Clear old maps */
64560 error = -ENOMEM;
64561 -munmap_back:
64562 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64563 if (vma && vma->vm_start < addr + len) {
64564 if (do_munmap(mm, addr, len))
64565 return -ENOMEM;
64566 - goto munmap_back;
64567 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64568 + BUG_ON(vma && vma->vm_start < addr + len);
64569 }
64570
64571 /* Check against address space limit. */
64572 @@ -1173,6 +1294,16 @@ munmap_back:
64573 goto unacct_error;
64574 }
64575
64576 +#ifdef CONFIG_PAX_SEGMEXEC
64577 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
64578 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
64579 + if (!vma_m) {
64580 + error = -ENOMEM;
64581 + goto free_vma;
64582 + }
64583 + }
64584 +#endif
64585 +
64586 vma->vm_mm = mm;
64587 vma->vm_start = addr;
64588 vma->vm_end = addr + len;
64589 @@ -1195,6 +1326,19 @@ munmap_back:
64590 error = file->f_op->mmap(file, vma);
64591 if (error)
64592 goto unmap_and_free_vma;
64593 +
64594 +#ifdef CONFIG_PAX_SEGMEXEC
64595 + if (vma_m && (vm_flags & VM_EXECUTABLE))
64596 + added_exe_file_vma(mm);
64597 +#endif
64598 +
64599 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64600 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
64601 + vma->vm_flags |= VM_PAGEEXEC;
64602 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
64603 + }
64604 +#endif
64605 +
64606 if (vm_flags & VM_EXECUTABLE)
64607 added_exe_file_vma(mm);
64608
64609 @@ -1218,6 +1362,11 @@ munmap_back:
64610 vma_link(mm, vma, prev, rb_link, rb_parent);
64611 file = vma->vm_file;
64612
64613 +#ifdef CONFIG_PAX_SEGMEXEC
64614 + if (vma_m)
64615 + pax_mirror_vma(vma_m, vma);
64616 +#endif
64617 +
64618 /* Once vma denies write, undo our temporary denial count */
64619 if (correct_wcount)
64620 atomic_inc(&inode->i_writecount);
64621 @@ -1226,6 +1375,7 @@ out:
64622
64623 mm->total_vm += len >> PAGE_SHIFT;
64624 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
64625 + track_exec_limit(mm, addr, addr + len, vm_flags);
64626 if (vm_flags & VM_LOCKED) {
64627 /*
64628 * makes pages present; downgrades, drops, reacquires mmap_sem
64629 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
64630 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
64631 charged = 0;
64632 free_vma:
64633 +
64634 +#ifdef CONFIG_PAX_SEGMEXEC
64635 + if (vma_m)
64636 + kmem_cache_free(vm_area_cachep, vma_m);
64637 +#endif
64638 +
64639 kmem_cache_free(vm_area_cachep, vma);
64640 unacct_error:
64641 if (charged)
64642 @@ -1255,6 +1411,44 @@ unacct_error:
64643 return error;
64644 }
64645
64646 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
64647 +{
64648 + if (!vma) {
64649 +#ifdef CONFIG_STACK_GROWSUP
64650 + if (addr > sysctl_heap_stack_gap)
64651 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
64652 + else
64653 + vma = find_vma(current->mm, 0);
64654 + if (vma && (vma->vm_flags & VM_GROWSUP))
64655 + return false;
64656 +#endif
64657 + return true;
64658 + }
64659 +
64660 + if (addr + len > vma->vm_start)
64661 + return false;
64662 +
64663 + if (vma->vm_flags & VM_GROWSDOWN)
64664 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
64665 +#ifdef CONFIG_STACK_GROWSUP
64666 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
64667 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
64668 +#endif
64669 +
64670 + return true;
64671 +}
64672 +
64673 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
64674 +{
64675 + if (vma->vm_start < len)
64676 + return -ENOMEM;
64677 + if (!(vma->vm_flags & VM_GROWSDOWN))
64678 + return vma->vm_start - len;
64679 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
64680 + return vma->vm_start - len - sysctl_heap_stack_gap;
64681 + return -ENOMEM;
64682 +}
64683 +
64684 /* Get an address range which is currently unmapped.
64685 * For shmat() with addr=0.
64686 *
64687 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
64688 if (flags & MAP_FIXED)
64689 return addr;
64690
64691 +#ifdef CONFIG_PAX_RANDMMAP
64692 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64693 +#endif
64694 +
64695 if (addr) {
64696 addr = PAGE_ALIGN(addr);
64697 - vma = find_vma(mm, addr);
64698 - if (TASK_SIZE - len >= addr &&
64699 - (!vma || addr + len <= vma->vm_start))
64700 - return addr;
64701 + if (TASK_SIZE - len >= addr) {
64702 + vma = find_vma(mm, addr);
64703 + if (check_heap_stack_gap(vma, addr, len))
64704 + return addr;
64705 + }
64706 }
64707 if (len > mm->cached_hole_size) {
64708 - start_addr = addr = mm->free_area_cache;
64709 + start_addr = addr = mm->free_area_cache;
64710 } else {
64711 - start_addr = addr = TASK_UNMAPPED_BASE;
64712 - mm->cached_hole_size = 0;
64713 + start_addr = addr = mm->mmap_base;
64714 + mm->cached_hole_size = 0;
64715 }
64716
64717 full_search:
64718 @@ -1303,34 +1502,40 @@ full_search:
64719 * Start a new search - just in case we missed
64720 * some holes.
64721 */
64722 - if (start_addr != TASK_UNMAPPED_BASE) {
64723 - addr = TASK_UNMAPPED_BASE;
64724 - start_addr = addr;
64725 + if (start_addr != mm->mmap_base) {
64726 + start_addr = addr = mm->mmap_base;
64727 mm->cached_hole_size = 0;
64728 goto full_search;
64729 }
64730 return -ENOMEM;
64731 }
64732 - if (!vma || addr + len <= vma->vm_start) {
64733 - /*
64734 - * Remember the place where we stopped the search:
64735 - */
64736 - mm->free_area_cache = addr + len;
64737 - return addr;
64738 - }
64739 + if (check_heap_stack_gap(vma, addr, len))
64740 + break;
64741 if (addr + mm->cached_hole_size < vma->vm_start)
64742 mm->cached_hole_size = vma->vm_start - addr;
64743 addr = vma->vm_end;
64744 }
64745 +
64746 + /*
64747 + * Remember the place where we stopped the search:
64748 + */
64749 + mm->free_area_cache = addr + len;
64750 + return addr;
64751 }
64752 #endif
64753
64754 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
64755 {
64756 +
64757 +#ifdef CONFIG_PAX_SEGMEXEC
64758 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64759 + return;
64760 +#endif
64761 +
64762 /*
64763 * Is this a new hole at the lowest possible address?
64764 */
64765 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
64766 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
64767 mm->free_area_cache = addr;
64768 mm->cached_hole_size = ~0UL;
64769 }
64770 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
64771 {
64772 struct vm_area_struct *vma;
64773 struct mm_struct *mm = current->mm;
64774 - unsigned long addr = addr0;
64775 + unsigned long base = mm->mmap_base, addr = addr0;
64776
64777 /* requested length too big for entire address space */
64778 if (len > TASK_SIZE)
64779 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
64780 if (flags & MAP_FIXED)
64781 return addr;
64782
64783 +#ifdef CONFIG_PAX_RANDMMAP
64784 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64785 +#endif
64786 +
64787 /* requesting a specific address */
64788 if (addr) {
64789 addr = PAGE_ALIGN(addr);
64790 - vma = find_vma(mm, addr);
64791 - if (TASK_SIZE - len >= addr &&
64792 - (!vma || addr + len <= vma->vm_start))
64793 - return addr;
64794 + if (TASK_SIZE - len >= addr) {
64795 + vma = find_vma(mm, addr);
64796 + if (check_heap_stack_gap(vma, addr, len))
64797 + return addr;
64798 + }
64799 }
64800
64801 /* check if free_area_cache is useful for us */
64802 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
64803 /* make sure it can fit in the remaining address space */
64804 if (addr > len) {
64805 vma = find_vma(mm, addr-len);
64806 - if (!vma || addr <= vma->vm_start)
64807 + if (check_heap_stack_gap(vma, addr - len, len))
64808 /* remember the address as a hint for next time */
64809 return (mm->free_area_cache = addr-len);
64810 }
64811 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
64812 * return with success:
64813 */
64814 vma = find_vma(mm, addr);
64815 - if (!vma || addr+len <= vma->vm_start)
64816 + if (check_heap_stack_gap(vma, addr, len))
64817 /* remember the address as a hint for next time */
64818 return (mm->free_area_cache = addr);
64819
64820 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
64821 mm->cached_hole_size = vma->vm_start - addr;
64822
64823 /* try just below the current vma->vm_start */
64824 - addr = vma->vm_start-len;
64825 - } while (len < vma->vm_start);
64826 + addr = skip_heap_stack_gap(vma, len);
64827 + } while (!IS_ERR_VALUE(addr));
64828
64829 bottomup:
64830 /*
64831 @@ -1414,13 +1624,21 @@ bottomup:
64832 * can happen with large stack limits and large mmap()
64833 * allocations.
64834 */
64835 + mm->mmap_base = TASK_UNMAPPED_BASE;
64836 +
64837 +#ifdef CONFIG_PAX_RANDMMAP
64838 + if (mm->pax_flags & MF_PAX_RANDMMAP)
64839 + mm->mmap_base += mm->delta_mmap;
64840 +#endif
64841 +
64842 + mm->free_area_cache = mm->mmap_base;
64843 mm->cached_hole_size = ~0UL;
64844 - mm->free_area_cache = TASK_UNMAPPED_BASE;
64845 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
64846 /*
64847 * Restore the topdown base:
64848 */
64849 - mm->free_area_cache = mm->mmap_base;
64850 + mm->mmap_base = base;
64851 + mm->free_area_cache = base;
64852 mm->cached_hole_size = ~0UL;
64853
64854 return addr;
64855 @@ -1429,6 +1647,12 @@ bottomup:
64856
64857 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
64858 {
64859 +
64860 +#ifdef CONFIG_PAX_SEGMEXEC
64861 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64862 + return;
64863 +#endif
64864 +
64865 /*
64866 * Is this a new hole at the highest possible address?
64867 */
64868 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
64869 mm->free_area_cache = addr;
64870
64871 /* dont allow allocations above current base */
64872 - if (mm->free_area_cache > mm->mmap_base)
64873 + if (mm->free_area_cache > mm->mmap_base) {
64874 mm->free_area_cache = mm->mmap_base;
64875 + mm->cached_hole_size = ~0UL;
64876 + }
64877 }
64878
64879 unsigned long
64880 @@ -1545,6 +1771,27 @@ out:
64881 return prev ? prev->vm_next : vma;
64882 }
64883
64884 +#ifdef CONFIG_PAX_SEGMEXEC
64885 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
64886 +{
64887 + struct vm_area_struct *vma_m;
64888 +
64889 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
64890 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
64891 + BUG_ON(vma->vm_mirror);
64892 + return NULL;
64893 + }
64894 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
64895 + vma_m = vma->vm_mirror;
64896 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
64897 + BUG_ON(vma->vm_file != vma_m->vm_file);
64898 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
64899 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
64900 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
64901 + return vma_m;
64902 +}
64903 +#endif
64904 +
64905 /*
64906 * Verify that the stack growth is acceptable and
64907 * update accounting. This is shared with both the
64908 @@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
64909 return -ENOMEM;
64910
64911 /* Stack limit test */
64912 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
64913 if (size > rlim[RLIMIT_STACK].rlim_cur)
64914 return -ENOMEM;
64915
64916 @@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
64917 unsigned long limit;
64918 locked = mm->locked_vm + grow;
64919 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
64920 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
64921 if (locked > limit && !capable(CAP_IPC_LOCK))
64922 return -ENOMEM;
64923 }
64924 @@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
64925 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
64926 * vma is the last one with address > vma->vm_end. Have to extend vma.
64927 */
64928 +#ifndef CONFIG_IA64
64929 +static
64930 +#endif
64931 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
64932 {
64933 int error;
64934 + bool locknext;
64935
64936 if (!(vma->vm_flags & VM_GROWSUP))
64937 return -EFAULT;
64938
64939 + /* Also guard against wrapping around to address 0. */
64940 + if (address < PAGE_ALIGN(address+1))
64941 + address = PAGE_ALIGN(address+1);
64942 + else
64943 + return -ENOMEM;
64944 +
64945 /*
64946 * We must make sure the anon_vma is allocated
64947 * so that the anon_vma locking is not a noop.
64948 */
64949 if (unlikely(anon_vma_prepare(vma)))
64950 return -ENOMEM;
64951 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
64952 + if (locknext && anon_vma_prepare(vma->vm_next))
64953 + return -ENOMEM;
64954 anon_vma_lock(vma);
64955 + if (locknext)
64956 + anon_vma_lock(vma->vm_next);
64957
64958 /*
64959 * vma->vm_start/vm_end cannot change under us because the caller
64960 * is required to hold the mmap_sem in read mode. We need the
64961 - * anon_vma lock to serialize against concurrent expand_stacks.
64962 - * Also guard against wrapping around to address 0.
64963 + * anon_vma locks to serialize against concurrent expand_stacks
64964 + * and expand_upwards.
64965 */
64966 - if (address < PAGE_ALIGN(address+4))
64967 - address = PAGE_ALIGN(address+4);
64968 - else {
64969 - anon_vma_unlock(vma);
64970 - return -ENOMEM;
64971 - }
64972 error = 0;
64973
64974 /* Somebody else might have raced and expanded it already */
64975 - if (address > vma->vm_end) {
64976 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
64977 + error = -ENOMEM;
64978 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
64979 unsigned long size, grow;
64980
64981 size = address - vma->vm_start;
64982 @@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
64983 if (!error)
64984 vma->vm_end = address;
64985 }
64986 + if (locknext)
64987 + anon_vma_unlock(vma->vm_next);
64988 anon_vma_unlock(vma);
64989 return error;
64990 }
64991 @@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
64992 unsigned long address)
64993 {
64994 int error;
64995 + bool lockprev = false;
64996 + struct vm_area_struct *prev;
64997
64998 /*
64999 * We must make sure the anon_vma is allocated
65000 @@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
65001 if (error)
65002 return error;
65003
65004 + prev = vma->vm_prev;
65005 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
65006 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
65007 +#endif
65008 + if (lockprev && anon_vma_prepare(prev))
65009 + return -ENOMEM;
65010 + if (lockprev)
65011 + anon_vma_lock(prev);
65012 +
65013 anon_vma_lock(vma);
65014
65015 /*
65016 @@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
65017 */
65018
65019 /* Somebody else might have raced and expanded it already */
65020 - if (address < vma->vm_start) {
65021 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
65022 + error = -ENOMEM;
65023 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
65024 unsigned long size, grow;
65025
65026 +#ifdef CONFIG_PAX_SEGMEXEC
65027 + struct vm_area_struct *vma_m;
65028 +
65029 + vma_m = pax_find_mirror_vma(vma);
65030 +#endif
65031 +
65032 size = vma->vm_end - address;
65033 grow = (vma->vm_start - address) >> PAGE_SHIFT;
65034
65035 @@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
65036 if (!error) {
65037 vma->vm_start = address;
65038 vma->vm_pgoff -= grow;
65039 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
65040 +
65041 +#ifdef CONFIG_PAX_SEGMEXEC
65042 + if (vma_m) {
65043 + vma_m->vm_start -= grow << PAGE_SHIFT;
65044 + vma_m->vm_pgoff -= grow;
65045 + }
65046 +#endif
65047 +
65048 }
65049 }
65050 anon_vma_unlock(vma);
65051 + if (lockprev)
65052 + anon_vma_unlock(prev);
65053 return error;
65054 }
65055
65056 @@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
65057 do {
65058 long nrpages = vma_pages(vma);
65059
65060 +#ifdef CONFIG_PAX_SEGMEXEC
65061 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
65062 + vma = remove_vma(vma);
65063 + continue;
65064 + }
65065 +#endif
65066 +
65067 mm->total_vm -= nrpages;
65068 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
65069 vma = remove_vma(vma);
65070 @@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
65071 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
65072 vma->vm_prev = NULL;
65073 do {
65074 +
65075 +#ifdef CONFIG_PAX_SEGMEXEC
65076 + if (vma->vm_mirror) {
65077 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
65078 + vma->vm_mirror->vm_mirror = NULL;
65079 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
65080 + vma->vm_mirror = NULL;
65081 + }
65082 +#endif
65083 +
65084 rb_erase(&vma->vm_rb, &mm->mm_rb);
65085 mm->map_count--;
65086 tail_vma = vma;
65087 @@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
65088 struct mempolicy *pol;
65089 struct vm_area_struct *new;
65090
65091 +#ifdef CONFIG_PAX_SEGMEXEC
65092 + struct vm_area_struct *vma_m, *new_m = NULL;
65093 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
65094 +#endif
65095 +
65096 if (is_vm_hugetlb_page(vma) && (addr &
65097 ~(huge_page_mask(hstate_vma(vma)))))
65098 return -EINVAL;
65099
65100 +#ifdef CONFIG_PAX_SEGMEXEC
65101 + vma_m = pax_find_mirror_vma(vma);
65102 +
65103 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
65104 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
65105 + if (mm->map_count >= sysctl_max_map_count-1)
65106 + return -ENOMEM;
65107 + } else
65108 +#endif
65109 +
65110 if (mm->map_count >= sysctl_max_map_count)
65111 return -ENOMEM;
65112
65113 @@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
65114 if (!new)
65115 return -ENOMEM;
65116
65117 +#ifdef CONFIG_PAX_SEGMEXEC
65118 + if (vma_m) {
65119 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65120 + if (!new_m) {
65121 + kmem_cache_free(vm_area_cachep, new);
65122 + return -ENOMEM;
65123 + }
65124 + }
65125 +#endif
65126 +
65127 /* most fields are the same, copy all, and then fixup */
65128 *new = *vma;
65129
65130 @@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
65131 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
65132 }
65133
65134 +#ifdef CONFIG_PAX_SEGMEXEC
65135 + if (vma_m) {
65136 + *new_m = *vma_m;
65137 + new_m->vm_mirror = new;
65138 + new->vm_mirror = new_m;
65139 +
65140 + if (new_below)
65141 + new_m->vm_end = addr_m;
65142 + else {
65143 + new_m->vm_start = addr_m;
65144 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
65145 + }
65146 + }
65147 +#endif
65148 +
65149 pol = mpol_dup(vma_policy(vma));
65150 if (IS_ERR(pol)) {
65151 +
65152 +#ifdef CONFIG_PAX_SEGMEXEC
65153 + if (new_m)
65154 + kmem_cache_free(vm_area_cachep, new_m);
65155 +#endif
65156 +
65157 kmem_cache_free(vm_area_cachep, new);
65158 return PTR_ERR(pol);
65159 }
65160 @@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
65161 else
65162 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
65163
65164 +#ifdef CONFIG_PAX_SEGMEXEC
65165 + if (vma_m) {
65166 + mpol_get(pol);
65167 + vma_set_policy(new_m, pol);
65168 +
65169 + if (new_m->vm_file) {
65170 + get_file(new_m->vm_file);
65171 + if (vma_m->vm_flags & VM_EXECUTABLE)
65172 + added_exe_file_vma(mm);
65173 + }
65174 +
65175 + if (new_m->vm_ops && new_m->vm_ops->open)
65176 + new_m->vm_ops->open(new_m);
65177 +
65178 + if (new_below)
65179 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
65180 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
65181 + else
65182 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
65183 + }
65184 +#endif
65185 +
65186 return 0;
65187 }
65188
65189 @@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
65190 * work. This now handles partial unmappings.
65191 * Jeremy Fitzhardinge <jeremy@goop.org>
65192 */
65193 +#ifdef CONFIG_PAX_SEGMEXEC
65194 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65195 +{
65196 + int ret = __do_munmap(mm, start, len);
65197 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
65198 + return ret;
65199 +
65200 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
65201 +}
65202 +
65203 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65204 +#else
65205 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65206 +#endif
65207 {
65208 unsigned long end;
65209 struct vm_area_struct *vma, *prev, *last;
65210
65211 + /*
65212 + * mm->mmap_sem is required to protect against another thread
65213 + * changing the mappings in case we sleep.
65214 + */
65215 + verify_mm_writelocked(mm);
65216 +
65217 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
65218 return -EINVAL;
65219
65220 @@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
65221 /* Fix up all other VM information */
65222 remove_vma_list(mm, vma);
65223
65224 + track_exec_limit(mm, start, end, 0UL);
65225 +
65226 return 0;
65227 }
65228
65229 @@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
65230
65231 profile_munmap(addr);
65232
65233 +#ifdef CONFIG_PAX_SEGMEXEC
65234 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
65235 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
65236 + return -EINVAL;
65237 +#endif
65238 +
65239 down_write(&mm->mmap_sem);
65240 ret = do_munmap(mm, addr, len);
65241 up_write(&mm->mmap_sem);
65242 return ret;
65243 }
65244
65245 -static inline void verify_mm_writelocked(struct mm_struct *mm)
65246 -{
65247 -#ifdef CONFIG_DEBUG_VM
65248 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
65249 - WARN_ON(1);
65250 - up_read(&mm->mmap_sem);
65251 - }
65252 -#endif
65253 -}
65254 -
65255 /*
65256 * this is really a simplified "do_mmap". it only handles
65257 * anonymous maps. eventually we may be able to do some
65258 @@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
65259 struct rb_node ** rb_link, * rb_parent;
65260 pgoff_t pgoff = addr >> PAGE_SHIFT;
65261 int error;
65262 + unsigned long charged;
65263
65264 len = PAGE_ALIGN(len);
65265 if (!len)
65266 @@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
65267
65268 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
65269
65270 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
65271 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65272 + flags &= ~VM_EXEC;
65273 +
65274 +#ifdef CONFIG_PAX_MPROTECT
65275 + if (mm->pax_flags & MF_PAX_MPROTECT)
65276 + flags &= ~VM_MAYEXEC;
65277 +#endif
65278 +
65279 + }
65280 +#endif
65281 +
65282 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
65283 if (error & ~PAGE_MASK)
65284 return error;
65285
65286 + charged = len >> PAGE_SHIFT;
65287 +
65288 /*
65289 * mlock MCL_FUTURE?
65290 */
65291 if (mm->def_flags & VM_LOCKED) {
65292 unsigned long locked, lock_limit;
65293 - locked = len >> PAGE_SHIFT;
65294 + locked = charged;
65295 locked += mm->locked_vm;
65296 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
65297 lock_limit >>= PAGE_SHIFT;
65298 @@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
65299 /*
65300 * Clear old maps. this also does some error checking for us
65301 */
65302 - munmap_back:
65303 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65304 if (vma && vma->vm_start < addr + len) {
65305 if (do_munmap(mm, addr, len))
65306 return -ENOMEM;
65307 - goto munmap_back;
65308 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65309 + BUG_ON(vma && vma->vm_start < addr + len);
65310 }
65311
65312 /* Check against address space limits *after* clearing old maps... */
65313 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
65314 + if (!may_expand_vm(mm, charged))
65315 return -ENOMEM;
65316
65317 if (mm->map_count > sysctl_max_map_count)
65318 return -ENOMEM;
65319
65320 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
65321 + if (security_vm_enough_memory(charged))
65322 return -ENOMEM;
65323
65324 /* Can we just expand an old private anonymous mapping? */
65325 @@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
65326 */
65327 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65328 if (!vma) {
65329 - vm_unacct_memory(len >> PAGE_SHIFT);
65330 + vm_unacct_memory(charged);
65331 return -ENOMEM;
65332 }
65333
65334 @@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
65335 vma->vm_page_prot = vm_get_page_prot(flags);
65336 vma_link(mm, vma, prev, rb_link, rb_parent);
65337 out:
65338 - mm->total_vm += len >> PAGE_SHIFT;
65339 + mm->total_vm += charged;
65340 if (flags & VM_LOCKED) {
65341 if (!mlock_vma_pages_range(vma, addr, addr + len))
65342 - mm->locked_vm += (len >> PAGE_SHIFT);
65343 + mm->locked_vm += charged;
65344 }
65345 + track_exec_limit(mm, addr, addr + len, flags);
65346 return addr;
65347 }
65348
65349 @@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
65350 * Walk the list again, actually closing and freeing it,
65351 * with preemption enabled, without holding any MM locks.
65352 */
65353 - while (vma)
65354 + while (vma) {
65355 + vma->vm_mirror = NULL;
65356 vma = remove_vma(vma);
65357 + }
65358
65359 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
65360 }
65361 @@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
65362 struct vm_area_struct * __vma, * prev;
65363 struct rb_node ** rb_link, * rb_parent;
65364
65365 +#ifdef CONFIG_PAX_SEGMEXEC
65366 + struct vm_area_struct *vma_m = NULL;
65367 +#endif
65368 +
65369 /*
65370 * The vm_pgoff of a purely anonymous vma should be irrelevant
65371 * until its first write fault, when page's anon_vma and index
65372 @@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
65373 if ((vma->vm_flags & VM_ACCOUNT) &&
65374 security_vm_enough_memory_mm(mm, vma_pages(vma)))
65375 return -ENOMEM;
65376 +
65377 +#ifdef CONFIG_PAX_SEGMEXEC
65378 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
65379 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65380 + if (!vma_m)
65381 + return -ENOMEM;
65382 + }
65383 +#endif
65384 +
65385 vma_link(mm, vma, prev, rb_link, rb_parent);
65386 +
65387 +#ifdef CONFIG_PAX_SEGMEXEC
65388 + if (vma_m)
65389 + pax_mirror_vma(vma_m, vma);
65390 +#endif
65391 +
65392 return 0;
65393 }
65394
65395 @@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
65396 struct rb_node **rb_link, *rb_parent;
65397 struct mempolicy *pol;
65398
65399 + BUG_ON(vma->vm_mirror);
65400 +
65401 /*
65402 * If anonymous vma has not yet been faulted, update new pgoff
65403 * to match new location, to increase its chance of merging.
65404 @@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
65405 return new_vma;
65406 }
65407
65408 +#ifdef CONFIG_PAX_SEGMEXEC
65409 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
65410 +{
65411 + struct vm_area_struct *prev_m;
65412 + struct rb_node **rb_link_m, *rb_parent_m;
65413 + struct mempolicy *pol_m;
65414 +
65415 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
65416 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
65417 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
65418 + *vma_m = *vma;
65419 + pol_m = vma_policy(vma_m);
65420 + mpol_get(pol_m);
65421 + vma_set_policy(vma_m, pol_m);
65422 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
65423 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
65424 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
65425 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
65426 + if (vma_m->vm_file)
65427 + get_file(vma_m->vm_file);
65428 + if (vma_m->vm_ops && vma_m->vm_ops->open)
65429 + vma_m->vm_ops->open(vma_m);
65430 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
65431 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
65432 + vma_m->vm_mirror = vma;
65433 + vma->vm_mirror = vma_m;
65434 +}
65435 +#endif
65436 +
65437 /*
65438 * Return true if the calling process may expand its vm space by the passed
65439 * number of pages
65440 @@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
65441 unsigned long lim;
65442
65443 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
65444 -
65445 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
65446 if (cur + npages > lim)
65447 return 0;
65448 return 1;
65449 @@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
65450 vma->vm_start = addr;
65451 vma->vm_end = addr + len;
65452
65453 +#ifdef CONFIG_PAX_MPROTECT
65454 + if (mm->pax_flags & MF_PAX_MPROTECT) {
65455 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
65456 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
65457 + return -EPERM;
65458 + if (!(vm_flags & VM_EXEC))
65459 + vm_flags &= ~VM_MAYEXEC;
65460 +#else
65461 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
65462 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
65463 +#endif
65464 + else
65465 + vm_flags &= ~VM_MAYWRITE;
65466 + }
65467 +#endif
65468 +
65469 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
65470 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65471
65472 diff -urNp linux-2.6.32.42/mm/mprotect.c linux-2.6.32.42/mm/mprotect.c
65473 --- linux-2.6.32.42/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
65474 +++ linux-2.6.32.42/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
65475 @@ -24,10 +24,16 @@
65476 #include <linux/mmu_notifier.h>
65477 #include <linux/migrate.h>
65478 #include <linux/perf_event.h>
65479 +
65480 +#ifdef CONFIG_PAX_MPROTECT
65481 +#include <linux/elf.h>
65482 +#endif
65483 +
65484 #include <asm/uaccess.h>
65485 #include <asm/pgtable.h>
65486 #include <asm/cacheflush.h>
65487 #include <asm/tlbflush.h>
65488 +#include <asm/mmu_context.h>
65489
65490 #ifndef pgprot_modify
65491 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
65492 @@ -132,6 +138,48 @@ static void change_protection(struct vm_
65493 flush_tlb_range(vma, start, end);
65494 }
65495
65496 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65497 +/* called while holding the mmap semaphor for writing except stack expansion */
65498 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
65499 +{
65500 + unsigned long oldlimit, newlimit = 0UL;
65501 +
65502 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
65503 + return;
65504 +
65505 + spin_lock(&mm->page_table_lock);
65506 + oldlimit = mm->context.user_cs_limit;
65507 + if ((prot & VM_EXEC) && oldlimit < end)
65508 + /* USER_CS limit moved up */
65509 + newlimit = end;
65510 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
65511 + /* USER_CS limit moved down */
65512 + newlimit = start;
65513 +
65514 + if (newlimit) {
65515 + mm->context.user_cs_limit = newlimit;
65516 +
65517 +#ifdef CONFIG_SMP
65518 + wmb();
65519 + cpus_clear(mm->context.cpu_user_cs_mask);
65520 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
65521 +#endif
65522 +
65523 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
65524 + }
65525 + spin_unlock(&mm->page_table_lock);
65526 + if (newlimit == end) {
65527 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
65528 +
65529 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
65530 + if (is_vm_hugetlb_page(vma))
65531 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
65532 + else
65533 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
65534 + }
65535 +}
65536 +#endif
65537 +
65538 int
65539 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
65540 unsigned long start, unsigned long end, unsigned long newflags)
65541 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
65542 int error;
65543 int dirty_accountable = 0;
65544
65545 +#ifdef CONFIG_PAX_SEGMEXEC
65546 + struct vm_area_struct *vma_m = NULL;
65547 + unsigned long start_m, end_m;
65548 +
65549 + start_m = start + SEGMEXEC_TASK_SIZE;
65550 + end_m = end + SEGMEXEC_TASK_SIZE;
65551 +#endif
65552 +
65553 if (newflags == oldflags) {
65554 *pprev = vma;
65555 return 0;
65556 }
65557
65558 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
65559 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
65560 +
65561 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
65562 + return -ENOMEM;
65563 +
65564 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
65565 + return -ENOMEM;
65566 + }
65567 +
65568 /*
65569 * If we make a private mapping writable we increase our commit;
65570 * but (without finer accounting) cannot reduce our commit if we
65571 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
65572 }
65573 }
65574
65575 +#ifdef CONFIG_PAX_SEGMEXEC
65576 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
65577 + if (start != vma->vm_start) {
65578 + error = split_vma(mm, vma, start, 1);
65579 + if (error)
65580 + goto fail;
65581 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
65582 + *pprev = (*pprev)->vm_next;
65583 + }
65584 +
65585 + if (end != vma->vm_end) {
65586 + error = split_vma(mm, vma, end, 0);
65587 + if (error)
65588 + goto fail;
65589 + }
65590 +
65591 + if (pax_find_mirror_vma(vma)) {
65592 + error = __do_munmap(mm, start_m, end_m - start_m);
65593 + if (error)
65594 + goto fail;
65595 + } else {
65596 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65597 + if (!vma_m) {
65598 + error = -ENOMEM;
65599 + goto fail;
65600 + }
65601 + vma->vm_flags = newflags;
65602 + pax_mirror_vma(vma_m, vma);
65603 + }
65604 + }
65605 +#endif
65606 +
65607 /*
65608 * First try to merge with previous and/or next vma.
65609 */
65610 @@ -195,9 +293,21 @@ success:
65611 * vm_flags and vm_page_prot are protected by the mmap_sem
65612 * held in write mode.
65613 */
65614 +
65615 +#ifdef CONFIG_PAX_SEGMEXEC
65616 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
65617 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
65618 +#endif
65619 +
65620 vma->vm_flags = newflags;
65621 +
65622 +#ifdef CONFIG_PAX_MPROTECT
65623 + if (mm->binfmt && mm->binfmt->handle_mprotect)
65624 + mm->binfmt->handle_mprotect(vma, newflags);
65625 +#endif
65626 +
65627 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
65628 - vm_get_page_prot(newflags));
65629 + vm_get_page_prot(vma->vm_flags));
65630
65631 if (vma_wants_writenotify(vma)) {
65632 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
65633 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65634 end = start + len;
65635 if (end <= start)
65636 return -ENOMEM;
65637 +
65638 +#ifdef CONFIG_PAX_SEGMEXEC
65639 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
65640 + if (end > SEGMEXEC_TASK_SIZE)
65641 + return -EINVAL;
65642 + } else
65643 +#endif
65644 +
65645 + if (end > TASK_SIZE)
65646 + return -EINVAL;
65647 +
65648 if (!arch_validate_prot(prot))
65649 return -EINVAL;
65650
65651 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65652 /*
65653 * Does the application expect PROT_READ to imply PROT_EXEC:
65654 */
65655 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
65656 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
65657 prot |= PROT_EXEC;
65658
65659 vm_flags = calc_vm_prot_bits(prot);
65660 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65661 if (start > vma->vm_start)
65662 prev = vma;
65663
65664 +#ifdef CONFIG_PAX_MPROTECT
65665 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
65666 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
65667 +#endif
65668 +
65669 for (nstart = start ; ; ) {
65670 unsigned long newflags;
65671
65672 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65673
65674 /* newflags >> 4 shift VM_MAY% in place of VM_% */
65675 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
65676 + if (prot & (PROT_WRITE | PROT_EXEC))
65677 + gr_log_rwxmprotect(vma->vm_file);
65678 +
65679 + error = -EACCES;
65680 + goto out;
65681 + }
65682 +
65683 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
65684 error = -EACCES;
65685 goto out;
65686 }
65687 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65688 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
65689 if (error)
65690 goto out;
65691 +
65692 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
65693 +
65694 nstart = tmp;
65695
65696 if (nstart < prev->vm_end)
65697 diff -urNp linux-2.6.32.42/mm/mremap.c linux-2.6.32.42/mm/mremap.c
65698 --- linux-2.6.32.42/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
65699 +++ linux-2.6.32.42/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
65700 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
65701 continue;
65702 pte = ptep_clear_flush(vma, old_addr, old_pte);
65703 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
65704 +
65705 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65706 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
65707 + pte = pte_exprotect(pte);
65708 +#endif
65709 +
65710 set_pte_at(mm, new_addr, new_pte, pte);
65711 }
65712
65713 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
65714 if (is_vm_hugetlb_page(vma))
65715 goto Einval;
65716
65717 +#ifdef CONFIG_PAX_SEGMEXEC
65718 + if (pax_find_mirror_vma(vma))
65719 + goto Einval;
65720 +#endif
65721 +
65722 /* We can't remap across vm area boundaries */
65723 if (old_len > vma->vm_end - addr)
65724 goto Efault;
65725 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
65726 unsigned long ret = -EINVAL;
65727 unsigned long charged = 0;
65728 unsigned long map_flags;
65729 + unsigned long pax_task_size = TASK_SIZE;
65730
65731 if (new_addr & ~PAGE_MASK)
65732 goto out;
65733
65734 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
65735 +#ifdef CONFIG_PAX_SEGMEXEC
65736 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
65737 + pax_task_size = SEGMEXEC_TASK_SIZE;
65738 +#endif
65739 +
65740 + pax_task_size -= PAGE_SIZE;
65741 +
65742 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
65743 goto out;
65744
65745 /* Check if the location we're moving into overlaps the
65746 * old location at all, and fail if it does.
65747 */
65748 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
65749 - goto out;
65750 -
65751 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
65752 + if (addr + old_len > new_addr && new_addr + new_len > addr)
65753 goto out;
65754
65755 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65756 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
65757 struct vm_area_struct *vma;
65758 unsigned long ret = -EINVAL;
65759 unsigned long charged = 0;
65760 + unsigned long pax_task_size = TASK_SIZE;
65761
65762 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
65763 goto out;
65764 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
65765 if (!new_len)
65766 goto out;
65767
65768 +#ifdef CONFIG_PAX_SEGMEXEC
65769 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
65770 + pax_task_size = SEGMEXEC_TASK_SIZE;
65771 +#endif
65772 +
65773 + pax_task_size -= PAGE_SIZE;
65774 +
65775 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
65776 + old_len > pax_task_size || addr > pax_task_size-old_len)
65777 + goto out;
65778 +
65779 if (flags & MREMAP_FIXED) {
65780 if (flags & MREMAP_MAYMOVE)
65781 ret = mremap_to(addr, old_len, new_addr, new_len);
65782 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
65783 addr + new_len);
65784 }
65785 ret = addr;
65786 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
65787 goto out;
65788 }
65789 }
65790 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
65791 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65792 if (ret)
65793 goto out;
65794 +
65795 + map_flags = vma->vm_flags;
65796 ret = move_vma(vma, addr, old_len, new_len, new_addr);
65797 + if (!(ret & ~PAGE_MASK)) {
65798 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
65799 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
65800 + }
65801 }
65802 out:
65803 if (ret & ~PAGE_MASK)
65804 diff -urNp linux-2.6.32.42/mm/nommu.c linux-2.6.32.42/mm/nommu.c
65805 --- linux-2.6.32.42/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
65806 +++ linux-2.6.32.42/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
65807 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
65808 int sysctl_overcommit_ratio = 50; /* default is 50% */
65809 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
65810 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
65811 -int heap_stack_gap = 0;
65812
65813 atomic_long_t mmap_pages_allocated;
65814
65815 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
65816 EXPORT_SYMBOL(find_vma);
65817
65818 /*
65819 - * find a VMA
65820 - * - we don't extend stack VMAs under NOMMU conditions
65821 - */
65822 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
65823 -{
65824 - return find_vma(mm, addr);
65825 -}
65826 -
65827 -/*
65828 * expand a stack to a given address
65829 * - not supported under NOMMU conditions
65830 */
65831 diff -urNp linux-2.6.32.42/mm/page_alloc.c linux-2.6.32.42/mm/page_alloc.c
65832 --- linux-2.6.32.42/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
65833 +++ linux-2.6.32.42/mm/page_alloc.c 2011-06-25 12:56:37.000000000 -0400
65834 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
65835 int bad = 0;
65836 int wasMlocked = __TestClearPageMlocked(page);
65837
65838 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
65839 + unsigned long index = 1UL << order;
65840 +#endif
65841 +
65842 kmemcheck_free_shadow(page, order);
65843
65844 for (i = 0 ; i < (1 << order) ; ++i)
65845 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
65846 debug_check_no_obj_freed(page_address(page),
65847 PAGE_SIZE << order);
65848 }
65849 +
65850 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
65851 + for (; index; --index)
65852 + sanitize_highpage(page + index - 1);
65853 +#endif
65854 +
65855 arch_free_page(page, order);
65856 kernel_map_pages(page, 1 << order, 0);
65857
65858 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
65859 arch_alloc_page(page, order);
65860 kernel_map_pages(page, 1 << order, 1);
65861
65862 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
65863 if (gfp_flags & __GFP_ZERO)
65864 prep_zero_page(page, order, gfp_flags);
65865 +#endif
65866
65867 if (order && (gfp_flags & __GFP_COMP))
65868 prep_compound_page(page, order);
65869 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
65870 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
65871 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
65872 }
65873 +
65874 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
65875 + sanitize_highpage(page);
65876 +#endif
65877 +
65878 arch_free_page(page, 0);
65879 kernel_map_pages(page, 1, 0);
65880
65881 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
65882 int cpu;
65883 struct zone *zone;
65884
65885 + pax_track_stack();
65886 +
65887 for_each_populated_zone(zone) {
65888 show_node(zone);
65889 printk("%s per-cpu:\n", zone->name);
65890 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
65891 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
65892 }
65893 #else
65894 -static void inline setup_usemap(struct pglist_data *pgdat,
65895 +static inline void setup_usemap(struct pglist_data *pgdat,
65896 struct zone *zone, unsigned long zonesize) {}
65897 #endif /* CONFIG_SPARSEMEM */
65898
65899 diff -urNp linux-2.6.32.42/mm/percpu.c linux-2.6.32.42/mm/percpu.c
65900 --- linux-2.6.32.42/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
65901 +++ linux-2.6.32.42/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
65902 @@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
65903 static unsigned int pcpu_last_unit_cpu __read_mostly;
65904
65905 /* the address of the first chunk which starts with the kernel static area */
65906 -void *pcpu_base_addr __read_mostly;
65907 +void *pcpu_base_addr __read_only;
65908 EXPORT_SYMBOL_GPL(pcpu_base_addr);
65909
65910 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
65911 diff -urNp linux-2.6.32.42/mm/rmap.c linux-2.6.32.42/mm/rmap.c
65912 --- linux-2.6.32.42/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
65913 +++ linux-2.6.32.42/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
65914 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
65915 /* page_table_lock to protect against threads */
65916 spin_lock(&mm->page_table_lock);
65917 if (likely(!vma->anon_vma)) {
65918 +
65919 +#ifdef CONFIG_PAX_SEGMEXEC
65920 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
65921 +
65922 + if (vma_m) {
65923 + BUG_ON(vma_m->anon_vma);
65924 + vma_m->anon_vma = anon_vma;
65925 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
65926 + }
65927 +#endif
65928 +
65929 vma->anon_vma = anon_vma;
65930 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
65931 allocated = NULL;
65932 diff -urNp linux-2.6.32.42/mm/shmem.c linux-2.6.32.42/mm/shmem.c
65933 --- linux-2.6.32.42/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
65934 +++ linux-2.6.32.42/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
65935 @@ -31,7 +31,7 @@
65936 #include <linux/swap.h>
65937 #include <linux/ima.h>
65938
65939 -static struct vfsmount *shm_mnt;
65940 +struct vfsmount *shm_mnt;
65941
65942 #ifdef CONFIG_SHMEM
65943 /*
65944 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
65945 goto unlock;
65946 }
65947 entry = shmem_swp_entry(info, index, NULL);
65948 + if (!entry)
65949 + goto unlock;
65950 if (entry->val) {
65951 /*
65952 * The more uptodate page coming down from a stacked
65953 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
65954 struct vm_area_struct pvma;
65955 struct page *page;
65956
65957 + pax_track_stack();
65958 +
65959 spol = mpol_cond_copy(&mpol,
65960 mpol_shared_policy_lookup(&info->policy, idx));
65961
65962 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
65963
65964 info = SHMEM_I(inode);
65965 inode->i_size = len-1;
65966 - if (len <= (char *)inode - (char *)info) {
65967 + if (len <= (char *)inode - (char *)info && len <= 64) {
65968 /* do it inline */
65969 memcpy(info, symname, len);
65970 inode->i_op = &shmem_symlink_inline_operations;
65971 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
65972 int err = -ENOMEM;
65973
65974 /* Round up to L1_CACHE_BYTES to resist false sharing */
65975 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
65976 - L1_CACHE_BYTES), GFP_KERNEL);
65977 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
65978 if (!sbinfo)
65979 return -ENOMEM;
65980
65981 diff -urNp linux-2.6.32.42/mm/slab.c linux-2.6.32.42/mm/slab.c
65982 --- linux-2.6.32.42/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
65983 +++ linux-2.6.32.42/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
65984 @@ -174,7 +174,7 @@
65985
65986 /* Legal flag mask for kmem_cache_create(). */
65987 #if DEBUG
65988 -# define CREATE_MASK (SLAB_RED_ZONE | \
65989 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
65990 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
65991 SLAB_CACHE_DMA | \
65992 SLAB_STORE_USER | \
65993 @@ -182,7 +182,7 @@
65994 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
65995 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
65996 #else
65997 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
65998 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
65999 SLAB_CACHE_DMA | \
66000 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
66001 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66002 @@ -308,7 +308,7 @@ struct kmem_list3 {
66003 * Need this for bootstrapping a per node allocator.
66004 */
66005 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
66006 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
66007 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
66008 #define CACHE_CACHE 0
66009 #define SIZE_AC MAX_NUMNODES
66010 #define SIZE_L3 (2 * MAX_NUMNODES)
66011 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
66012 if ((x)->max_freeable < i) \
66013 (x)->max_freeable = i; \
66014 } while (0)
66015 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
66016 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
66017 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
66018 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
66019 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
66020 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
66021 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
66022 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
66023 #else
66024 #define STATS_INC_ACTIVE(x) do { } while (0)
66025 #define STATS_DEC_ACTIVE(x) do { } while (0)
66026 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
66027 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
66028 */
66029 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
66030 - const struct slab *slab, void *obj)
66031 + const struct slab *slab, const void *obj)
66032 {
66033 u32 offset = (obj - slab->s_mem);
66034 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
66035 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
66036 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
66037 sizes[INDEX_AC].cs_size,
66038 ARCH_KMALLOC_MINALIGN,
66039 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66040 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66041 NULL);
66042
66043 if (INDEX_AC != INDEX_L3) {
66044 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
66045 kmem_cache_create(names[INDEX_L3].name,
66046 sizes[INDEX_L3].cs_size,
66047 ARCH_KMALLOC_MINALIGN,
66048 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66049 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66050 NULL);
66051 }
66052
66053 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
66054 sizes->cs_cachep = kmem_cache_create(names->name,
66055 sizes->cs_size,
66056 ARCH_KMALLOC_MINALIGN,
66057 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66058 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66059 NULL);
66060 }
66061 #ifdef CONFIG_ZONE_DMA
66062 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
66063 }
66064 /* cpu stats */
66065 {
66066 - unsigned long allochit = atomic_read(&cachep->allochit);
66067 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
66068 - unsigned long freehit = atomic_read(&cachep->freehit);
66069 - unsigned long freemiss = atomic_read(&cachep->freemiss);
66070 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
66071 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
66072 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
66073 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
66074
66075 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
66076 allochit, allocmiss, freehit, freemiss);
66077 @@ -4471,15 +4471,66 @@ static const struct file_operations proc
66078
66079 static int __init slab_proc_init(void)
66080 {
66081 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
66082 + mode_t gr_mode = S_IRUGO;
66083 +
66084 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66085 + gr_mode = S_IRUSR;
66086 +#endif
66087 +
66088 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
66089 #ifdef CONFIG_DEBUG_SLAB_LEAK
66090 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
66091 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
66092 #endif
66093 return 0;
66094 }
66095 module_init(slab_proc_init);
66096 #endif
66097
66098 +void check_object_size(const void *ptr, unsigned long n, bool to)
66099 +{
66100 +
66101 +#ifdef CONFIG_PAX_USERCOPY
66102 + struct page *page;
66103 + struct kmem_cache *cachep = NULL;
66104 + struct slab *slabp;
66105 + unsigned int objnr;
66106 + unsigned long offset;
66107 +
66108 + if (!n)
66109 + return;
66110 +
66111 + if (ZERO_OR_NULL_PTR(ptr))
66112 + goto report;
66113 +
66114 + if (!virt_addr_valid(ptr))
66115 + return;
66116 +
66117 + page = virt_to_head_page(ptr);
66118 +
66119 + if (!PageSlab(page)) {
66120 + if (object_is_on_stack(ptr, n) == -1)
66121 + goto report;
66122 + return;
66123 + }
66124 +
66125 + cachep = page_get_cache(page);
66126 + if (!(cachep->flags & SLAB_USERCOPY))
66127 + goto report;
66128 +
66129 + slabp = page_get_slab(page);
66130 + objnr = obj_to_index(cachep, slabp, ptr);
66131 + BUG_ON(objnr >= cachep->num);
66132 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
66133 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
66134 + return;
66135 +
66136 +report:
66137 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
66138 +#endif
66139 +
66140 +}
66141 +EXPORT_SYMBOL(check_object_size);
66142 +
66143 /**
66144 * ksize - get the actual amount of memory allocated for a given object
66145 * @objp: Pointer to the object
66146 diff -urNp linux-2.6.32.42/mm/slob.c linux-2.6.32.42/mm/slob.c
66147 --- linux-2.6.32.42/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
66148 +++ linux-2.6.32.42/mm/slob.c 2011-04-17 15:56:46.000000000 -0400
66149 @@ -29,7 +29,7 @@
66150 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
66151 * alloc_pages() directly, allocating compound pages so the page order
66152 * does not have to be separately tracked, and also stores the exact
66153 - * allocation size in page->private so that it can be used to accurately
66154 + * allocation size in slob_page->size so that it can be used to accurately
66155 * provide ksize(). These objects are detected in kfree() because slob_page()
66156 * is false for them.
66157 *
66158 @@ -58,6 +58,7 @@
66159 */
66160
66161 #include <linux/kernel.h>
66162 +#include <linux/sched.h>
66163 #include <linux/slab.h>
66164 #include <linux/mm.h>
66165 #include <linux/swap.h> /* struct reclaim_state */
66166 @@ -100,7 +101,8 @@ struct slob_page {
66167 unsigned long flags; /* mandatory */
66168 atomic_t _count; /* mandatory */
66169 slobidx_t units; /* free units left in page */
66170 - unsigned long pad[2];
66171 + unsigned long pad[1];
66172 + unsigned long size; /* size when >=PAGE_SIZE */
66173 slob_t *free; /* first free slob_t in page */
66174 struct list_head list; /* linked list of free pages */
66175 };
66176 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
66177 */
66178 static inline int is_slob_page(struct slob_page *sp)
66179 {
66180 - return PageSlab((struct page *)sp);
66181 + return PageSlab((struct page *)sp) && !sp->size;
66182 }
66183
66184 static inline void set_slob_page(struct slob_page *sp)
66185 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
66186
66187 static inline struct slob_page *slob_page(const void *addr)
66188 {
66189 - return (struct slob_page *)virt_to_page(addr);
66190 + return (struct slob_page *)virt_to_head_page(addr);
66191 }
66192
66193 /*
66194 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
66195 /*
66196 * Return the size of a slob block.
66197 */
66198 -static slobidx_t slob_units(slob_t *s)
66199 +static slobidx_t slob_units(const slob_t *s)
66200 {
66201 if (s->units > 0)
66202 return s->units;
66203 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
66204 /*
66205 * Return the next free slob block pointer after this one.
66206 */
66207 -static slob_t *slob_next(slob_t *s)
66208 +static slob_t *slob_next(const slob_t *s)
66209 {
66210 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
66211 slobidx_t next;
66212 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
66213 /*
66214 * Returns true if s is the last free block in its page.
66215 */
66216 -static int slob_last(slob_t *s)
66217 +static int slob_last(const slob_t *s)
66218 {
66219 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
66220 }
66221 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
66222 if (!page)
66223 return NULL;
66224
66225 + set_slob_page(page);
66226 return page_address(page);
66227 }
66228
66229 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
66230 if (!b)
66231 return NULL;
66232 sp = slob_page(b);
66233 - set_slob_page(sp);
66234
66235 spin_lock_irqsave(&slob_lock, flags);
66236 sp->units = SLOB_UNITS(PAGE_SIZE);
66237 sp->free = b;
66238 + sp->size = 0;
66239 INIT_LIST_HEAD(&sp->list);
66240 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
66241 set_slob_page_free(sp, slob_list);
66242 @@ -475,10 +478,9 @@ out:
66243 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
66244 #endif
66245
66246 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66247 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
66248 {
66249 - unsigned int *m;
66250 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66251 + slob_t *m;
66252 void *ret;
66253
66254 lockdep_trace_alloc(gfp);
66255 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
66256
66257 if (!m)
66258 return NULL;
66259 - *m = size;
66260 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
66261 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
66262 + m[0].units = size;
66263 + m[1].units = align;
66264 ret = (void *)m + align;
66265
66266 trace_kmalloc_node(_RET_IP_, ret,
66267 @@ -501,9 +506,9 @@ void *__kmalloc_node(size_t size, gfp_t
66268
66269 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
66270 if (ret) {
66271 - struct page *page;
66272 - page = virt_to_page(ret);
66273 - page->private = size;
66274 + struct slob_page *sp;
66275 + sp = slob_page(ret);
66276 + sp->size = size;
66277 }
66278
66279 trace_kmalloc_node(_RET_IP_, ret,
66280 @@ -513,6 +518,13 @@ void *__kmalloc_node(size_t size, gfp_t
66281 kmemleak_alloc(ret, size, 1, gfp);
66282 return ret;
66283 }
66284 +
66285 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66286 +{
66287 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66288 +
66289 + return __kmalloc_node_align(size, gfp, node, align);
66290 +}
66291 EXPORT_SYMBOL(__kmalloc_node);
66292
66293 void kfree(const void *block)
66294 @@ -528,13 +540,81 @@ void kfree(const void *block)
66295 sp = slob_page(block);
66296 if (is_slob_page(sp)) {
66297 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66298 - unsigned int *m = (unsigned int *)(block - align);
66299 - slob_free(m, *m + align);
66300 - } else
66301 + slob_t *m = (slob_t *)(block - align);
66302 + slob_free(m, m[0].units + align);
66303 + } else {
66304 + clear_slob_page(sp);
66305 + free_slob_page(sp);
66306 + sp->size = 0;
66307 put_page(&sp->page);
66308 + }
66309 }
66310 EXPORT_SYMBOL(kfree);
66311
66312 +void check_object_size(const void *ptr, unsigned long n, bool to)
66313 +{
66314 +
66315 +#ifdef CONFIG_PAX_USERCOPY
66316 + struct slob_page *sp;
66317 + const slob_t *free;
66318 + const void *base;
66319 +
66320 + if (!n)
66321 + return;
66322 +
66323 + if (ZERO_OR_NULL_PTR(ptr))
66324 + goto report;
66325 +
66326 + if (!virt_addr_valid(ptr))
66327 + return;
66328 +
66329 + sp = slob_page(ptr);
66330 + if (!PageSlab((struct page*)sp)) {
66331 + if (object_is_on_stack(ptr, n) == -1)
66332 + goto report;
66333 + return;
66334 + }
66335 +
66336 + if (sp->size) {
66337 + base = page_address(&sp->page);
66338 + if (base <= ptr && n <= sp->size - (ptr - base))
66339 + return;
66340 + goto report;
66341 + }
66342 +
66343 + /* some tricky double walking to find the chunk */
66344 + base = (void *)((unsigned long)ptr & PAGE_MASK);
66345 + free = sp->free;
66346 +
66347 + while (!slob_last(free) && (void *)free <= ptr) {
66348 + base = free + slob_units(free);
66349 + free = slob_next(free);
66350 + }
66351 +
66352 + while (base < (void *)free) {
66353 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
66354 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
66355 + int offset;
66356 +
66357 + if (ptr < base + align)
66358 + goto report;
66359 +
66360 + offset = ptr - base - align;
66361 + if (offset < m) {
66362 + if (n <= m - offset)
66363 + return;
66364 + goto report;
66365 + }
66366 + base += size;
66367 + }
66368 +
66369 +report:
66370 + pax_report_usercopy(ptr, n, to, NULL);
66371 +#endif
66372 +
66373 +}
66374 +EXPORT_SYMBOL(check_object_size);
66375 +
66376 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
66377 size_t ksize(const void *block)
66378 {
66379 @@ -547,10 +627,10 @@ size_t ksize(const void *block)
66380 sp = slob_page(block);
66381 if (is_slob_page(sp)) {
66382 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66383 - unsigned int *m = (unsigned int *)(block - align);
66384 - return SLOB_UNITS(*m) * SLOB_UNIT;
66385 + slob_t *m = (slob_t *)(block - align);
66386 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
66387 } else
66388 - return sp->page.private;
66389 + return sp->size;
66390 }
66391 EXPORT_SYMBOL(ksize);
66392
66393 @@ -605,17 +685,25 @@ void *kmem_cache_alloc_node(struct kmem_
66394 {
66395 void *b;
66396
66397 +#ifdef CONFIG_PAX_USERCOPY
66398 + b = __kmalloc_node_align(c->size, flags, node, c->align);
66399 +#else
66400 if (c->size < PAGE_SIZE) {
66401 b = slob_alloc(c->size, flags, c->align, node);
66402 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66403 SLOB_UNITS(c->size) * SLOB_UNIT,
66404 flags, node);
66405 } else {
66406 + struct slob_page *sp;
66407 +
66408 b = slob_new_pages(flags, get_order(c->size), node);
66409 + sp = slob_page(b);
66410 + sp->size = c->size;
66411 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66412 PAGE_SIZE << get_order(c->size),
66413 flags, node);
66414 }
66415 +#endif
66416
66417 if (c->ctor)
66418 c->ctor(b);
66419 @@ -627,10 +715,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
66420
66421 static void __kmem_cache_free(void *b, int size)
66422 {
66423 - if (size < PAGE_SIZE)
66424 + struct slob_page *sp = slob_page(b);
66425 +
66426 + if (is_slob_page(sp))
66427 slob_free(b, size);
66428 - else
66429 + else {
66430 + clear_slob_page(sp);
66431 + free_slob_page(sp);
66432 + sp->size = 0;
66433 slob_free_pages(b, get_order(size));
66434 + }
66435 }
66436
66437 static void kmem_rcu_free(struct rcu_head *head)
66438 @@ -643,15 +737,24 @@ static void kmem_rcu_free(struct rcu_hea
66439
66440 void kmem_cache_free(struct kmem_cache *c, void *b)
66441 {
66442 + int size = c->size;
66443 +
66444 +#ifdef CONFIG_PAX_USERCOPY
66445 + if (size + c->align < PAGE_SIZE) {
66446 + size += c->align;
66447 + b -= c->align;
66448 + }
66449 +#endif
66450 +
66451 kmemleak_free_recursive(b, c->flags);
66452 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
66453 struct slob_rcu *slob_rcu;
66454 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
66455 + slob_rcu = b + (size - sizeof(struct slob_rcu));
66456 INIT_RCU_HEAD(&slob_rcu->head);
66457 - slob_rcu->size = c->size;
66458 + slob_rcu->size = size;
66459 call_rcu(&slob_rcu->head, kmem_rcu_free);
66460 } else {
66461 - __kmem_cache_free(b, c->size);
66462 + __kmem_cache_free(b, size);
66463 }
66464
66465 trace_kmem_cache_free(_RET_IP_, b);
66466 diff -urNp linux-2.6.32.42/mm/slub.c linux-2.6.32.42/mm/slub.c
66467 --- linux-2.6.32.42/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
66468 +++ linux-2.6.32.42/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
66469 @@ -410,7 +410,7 @@ static void print_track(const char *s, s
66470 if (!t->addr)
66471 return;
66472
66473 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
66474 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
66475 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
66476 }
66477
66478 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
66479
66480 page = virt_to_head_page(x);
66481
66482 + BUG_ON(!PageSlab(page));
66483 +
66484 slab_free(s, page, x, _RET_IP_);
66485
66486 trace_kmem_cache_free(_RET_IP_, x);
66487 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
66488 * Merge control. If this is set then no merging of slab caches will occur.
66489 * (Could be removed. This was introduced to pacify the merge skeptics.)
66490 */
66491 -static int slub_nomerge;
66492 +static int slub_nomerge = 1;
66493
66494 /*
66495 * Calculate the order of allocation given an slab object size.
66496 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
66497 * list to avoid pounding the page allocator excessively.
66498 */
66499 set_min_partial(s, ilog2(s->size));
66500 - s->refcount = 1;
66501 + atomic_set(&s->refcount, 1);
66502 #ifdef CONFIG_NUMA
66503 s->remote_node_defrag_ratio = 1000;
66504 #endif
66505 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
66506 void kmem_cache_destroy(struct kmem_cache *s)
66507 {
66508 down_write(&slub_lock);
66509 - s->refcount--;
66510 - if (!s->refcount) {
66511 + if (atomic_dec_and_test(&s->refcount)) {
66512 list_del(&s->list);
66513 up_write(&slub_lock);
66514 if (kmem_cache_close(s)) {
66515 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
66516 __setup("slub_nomerge", setup_slub_nomerge);
66517
66518 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
66519 - const char *name, int size, gfp_t gfp_flags)
66520 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
66521 {
66522 - unsigned int flags = 0;
66523 -
66524 if (gfp_flags & SLUB_DMA)
66525 - flags = SLAB_CACHE_DMA;
66526 + flags |= SLAB_CACHE_DMA;
66527
66528 /*
66529 * This function is called with IRQs disabled during early-boot on
66530 @@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
66531 EXPORT_SYMBOL(__kmalloc_node);
66532 #endif
66533
66534 +void check_object_size(const void *ptr, unsigned long n, bool to)
66535 +{
66536 +
66537 +#ifdef CONFIG_PAX_USERCOPY
66538 + struct page *page;
66539 + struct kmem_cache *s = NULL;
66540 + unsigned long offset;
66541 +
66542 + if (!n)
66543 + return;
66544 +
66545 + if (ZERO_OR_NULL_PTR(ptr))
66546 + goto report;
66547 +
66548 + if (!virt_addr_valid(ptr))
66549 + return;
66550 +
66551 + page = get_object_page(ptr);
66552 +
66553 + if (!page) {
66554 + if (object_is_on_stack(ptr, n) == -1)
66555 + goto report;
66556 + return;
66557 + }
66558 +
66559 + s = page->slab;
66560 + if (!(s->flags & SLAB_USERCOPY))
66561 + goto report;
66562 +
66563 + offset = (ptr - page_address(page)) % s->size;
66564 + if (offset <= s->objsize && n <= s->objsize - offset)
66565 + return;
66566 +
66567 +report:
66568 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
66569 +#endif
66570 +
66571 +}
66572 +EXPORT_SYMBOL(check_object_size);
66573 +
66574 size_t ksize(const void *object)
66575 {
66576 struct page *page;
66577 @@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
66578 * kmem_cache_open for slab_state == DOWN.
66579 */
66580 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
66581 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
66582 - kmalloc_caches[0].refcount = -1;
66583 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
66584 + atomic_set(&kmalloc_caches[0].refcount, -1);
66585 caches++;
66586
66587 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
66588 @@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
66589 /* Caches that are not of the two-to-the-power-of size */
66590 if (KMALLOC_MIN_SIZE <= 32) {
66591 create_kmalloc_cache(&kmalloc_caches[1],
66592 - "kmalloc-96", 96, GFP_NOWAIT);
66593 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
66594 caches++;
66595 }
66596 if (KMALLOC_MIN_SIZE <= 64) {
66597 create_kmalloc_cache(&kmalloc_caches[2],
66598 - "kmalloc-192", 192, GFP_NOWAIT);
66599 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
66600 caches++;
66601 }
66602
66603 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
66604 create_kmalloc_cache(&kmalloc_caches[i],
66605 - "kmalloc", 1 << i, GFP_NOWAIT);
66606 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
66607 caches++;
66608 }
66609
66610 @@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
66611 /*
66612 * We may have set a slab to be unmergeable during bootstrap.
66613 */
66614 - if (s->refcount < 0)
66615 + if (atomic_read(&s->refcount) < 0)
66616 return 1;
66617
66618 return 0;
66619 @@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
66620 if (s) {
66621 int cpu;
66622
66623 - s->refcount++;
66624 + atomic_inc(&s->refcount);
66625 /*
66626 * Adjust the object sizes so that we clear
66627 * the complete object on kzalloc.
66628 @@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
66629
66630 if (sysfs_slab_alias(s, name)) {
66631 down_write(&slub_lock);
66632 - s->refcount--;
66633 + atomic_dec(&s->refcount);
66634 up_write(&slub_lock);
66635 goto err;
66636 }
66637 @@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
66638
66639 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
66640 {
66641 - return sprintf(buf, "%d\n", s->refcount - 1);
66642 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
66643 }
66644 SLAB_ATTR_RO(aliases);
66645
66646 @@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
66647 kfree(s);
66648 }
66649
66650 -static struct sysfs_ops slab_sysfs_ops = {
66651 +static const struct sysfs_ops slab_sysfs_ops = {
66652 .show = slab_attr_show,
66653 .store = slab_attr_store,
66654 };
66655 @@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
66656 return 0;
66657 }
66658
66659 -static struct kset_uevent_ops slab_uevent_ops = {
66660 +static const struct kset_uevent_ops slab_uevent_ops = {
66661 .filter = uevent_filter,
66662 };
66663
66664 @@ -4785,7 +4824,13 @@ static const struct file_operations proc
66665
66666 static int __init slab_proc_init(void)
66667 {
66668 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
66669 + mode_t gr_mode = S_IRUGO;
66670 +
66671 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66672 + gr_mode = S_IRUSR;
66673 +#endif
66674 +
66675 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
66676 return 0;
66677 }
66678 module_init(slab_proc_init);
66679 diff -urNp linux-2.6.32.42/mm/util.c linux-2.6.32.42/mm/util.c
66680 --- linux-2.6.32.42/mm/util.c 2011-03-27 14:31:47.000000000 -0400
66681 +++ linux-2.6.32.42/mm/util.c 2011-04-17 15:56:46.000000000 -0400
66682 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
66683 void arch_pick_mmap_layout(struct mm_struct *mm)
66684 {
66685 mm->mmap_base = TASK_UNMAPPED_BASE;
66686 +
66687 +#ifdef CONFIG_PAX_RANDMMAP
66688 + if (mm->pax_flags & MF_PAX_RANDMMAP)
66689 + mm->mmap_base += mm->delta_mmap;
66690 +#endif
66691 +
66692 mm->get_unmapped_area = arch_get_unmapped_area;
66693 mm->unmap_area = arch_unmap_area;
66694 }
66695 diff -urNp linux-2.6.32.42/mm/vmalloc.c linux-2.6.32.42/mm/vmalloc.c
66696 --- linux-2.6.32.42/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
66697 +++ linux-2.6.32.42/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
66698 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
66699
66700 pte = pte_offset_kernel(pmd, addr);
66701 do {
66702 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
66703 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
66704 +
66705 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66706 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
66707 + BUG_ON(!pte_exec(*pte));
66708 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
66709 + continue;
66710 + }
66711 +#endif
66712 +
66713 + {
66714 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
66715 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
66716 + }
66717 } while (pte++, addr += PAGE_SIZE, addr != end);
66718 }
66719
66720 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
66721 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
66722 {
66723 pte_t *pte;
66724 + int ret = -ENOMEM;
66725
66726 /*
66727 * nr is a running index into the array which helps higher level
66728 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
66729 pte = pte_alloc_kernel(pmd, addr);
66730 if (!pte)
66731 return -ENOMEM;
66732 +
66733 + pax_open_kernel();
66734 do {
66735 struct page *page = pages[*nr];
66736
66737 - if (WARN_ON(!pte_none(*pte)))
66738 - return -EBUSY;
66739 - if (WARN_ON(!page))
66740 - return -ENOMEM;
66741 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66742 + if (!(pgprot_val(prot) & _PAGE_NX))
66743 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
66744 + else
66745 +#endif
66746 +
66747 + if (WARN_ON(!pte_none(*pte))) {
66748 + ret = -EBUSY;
66749 + goto out;
66750 + }
66751 + if (WARN_ON(!page)) {
66752 + ret = -ENOMEM;
66753 + goto out;
66754 + }
66755 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
66756 (*nr)++;
66757 } while (pte++, addr += PAGE_SIZE, addr != end);
66758 - return 0;
66759 + ret = 0;
66760 +out:
66761 + pax_close_kernel();
66762 + return ret;
66763 }
66764
66765 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
66766 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
66767 * and fall back on vmalloc() if that fails. Others
66768 * just put it in the vmalloc space.
66769 */
66770 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
66771 +#ifdef CONFIG_MODULES
66772 +#ifdef MODULES_VADDR
66773 unsigned long addr = (unsigned long)x;
66774 if (addr >= MODULES_VADDR && addr < MODULES_END)
66775 return 1;
66776 #endif
66777 +
66778 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66779 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
66780 + return 1;
66781 +#endif
66782 +
66783 +#endif
66784 +
66785 return is_vmalloc_addr(x);
66786 }
66787
66788 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
66789
66790 if (!pgd_none(*pgd)) {
66791 pud_t *pud = pud_offset(pgd, addr);
66792 +#ifdef CONFIG_X86
66793 + if (!pud_large(*pud))
66794 +#endif
66795 if (!pud_none(*pud)) {
66796 pmd_t *pmd = pmd_offset(pud, addr);
66797 +#ifdef CONFIG_X86
66798 + if (!pmd_large(*pmd))
66799 +#endif
66800 if (!pmd_none(*pmd)) {
66801 pte_t *ptep, pte;
66802
66803 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
66804 struct rb_node *tmp;
66805
66806 while (*p) {
66807 - struct vmap_area *tmp;
66808 + struct vmap_area *varea;
66809
66810 parent = *p;
66811 - tmp = rb_entry(parent, struct vmap_area, rb_node);
66812 - if (va->va_start < tmp->va_end)
66813 + varea = rb_entry(parent, struct vmap_area, rb_node);
66814 + if (va->va_start < varea->va_end)
66815 p = &(*p)->rb_left;
66816 - else if (va->va_end > tmp->va_start)
66817 + else if (va->va_end > varea->va_start)
66818 p = &(*p)->rb_right;
66819 else
66820 BUG();
66821 @@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
66822 struct vm_struct *area;
66823
66824 BUG_ON(in_interrupt());
66825 +
66826 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66827 + if (flags & VM_KERNEXEC) {
66828 + if (start != VMALLOC_START || end != VMALLOC_END)
66829 + return NULL;
66830 + start = (unsigned long)MODULES_EXEC_VADDR;
66831 + end = (unsigned long)MODULES_EXEC_END;
66832 + }
66833 +#endif
66834 +
66835 if (flags & VM_IOREMAP) {
66836 int bit = fls(size);
66837
66838 @@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
66839 if (count > totalram_pages)
66840 return NULL;
66841
66842 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66843 + if (!(pgprot_val(prot) & _PAGE_NX))
66844 + flags |= VM_KERNEXEC;
66845 +#endif
66846 +
66847 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
66848 __builtin_return_address(0));
66849 if (!area)
66850 @@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
66851 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
66852 return NULL;
66853
66854 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66855 + if (!(pgprot_val(prot) & _PAGE_NX))
66856 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
66857 + node, gfp_mask, caller);
66858 + else
66859 +#endif
66860 +
66861 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
66862 VMALLOC_END, node, gfp_mask, caller);
66863
66864 @@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
66865 return addr;
66866 }
66867
66868 +#undef __vmalloc
66869 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
66870 {
66871 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
66872 @@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
66873 * For tight control over page level allocator and protection flags
66874 * use __vmalloc() instead.
66875 */
66876 +#undef vmalloc
66877 void *vmalloc(unsigned long size)
66878 {
66879 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
66880 @@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
66881 * The resulting memory area is zeroed so it can be mapped to userspace
66882 * without leaking data.
66883 */
66884 +#undef vmalloc_user
66885 void *vmalloc_user(unsigned long size)
66886 {
66887 struct vm_struct *area;
66888 @@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
66889 * For tight control over page level allocator and protection flags
66890 * use __vmalloc() instead.
66891 */
66892 +#undef vmalloc_node
66893 void *vmalloc_node(unsigned long size, int node)
66894 {
66895 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
66896 @@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
66897 * For tight control over page level allocator and protection flags
66898 * use __vmalloc() instead.
66899 */
66900 -
66901 +#undef vmalloc_exec
66902 void *vmalloc_exec(unsigned long size)
66903 {
66904 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
66905 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
66906 -1, __builtin_return_address(0));
66907 }
66908
66909 @@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
66910 * Allocate enough 32bit PA addressable pages to cover @size from the
66911 * page level allocator and map them into contiguous kernel virtual space.
66912 */
66913 +#undef vmalloc_32
66914 void *vmalloc_32(unsigned long size)
66915 {
66916 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
66917 @@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
66918 * The resulting memory area is 32bit addressable and zeroed so it can be
66919 * mapped to userspace without leaking data.
66920 */
66921 +#undef vmalloc_32_user
66922 void *vmalloc_32_user(unsigned long size)
66923 {
66924 struct vm_struct *area;
66925 @@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
66926 unsigned long uaddr = vma->vm_start;
66927 unsigned long usize = vma->vm_end - vma->vm_start;
66928
66929 + BUG_ON(vma->vm_mirror);
66930 +
66931 if ((PAGE_SIZE-1) & (unsigned long)addr)
66932 return -EINVAL;
66933
66934 diff -urNp linux-2.6.32.42/mm/vmstat.c linux-2.6.32.42/mm/vmstat.c
66935 --- linux-2.6.32.42/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
66936 +++ linux-2.6.32.42/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
66937 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
66938 *
66939 * vm_stat contains the global counters
66940 */
66941 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66942 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66943 EXPORT_SYMBOL(vm_stat);
66944
66945 #ifdef CONFIG_SMP
66946 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
66947 v = p->vm_stat_diff[i];
66948 p->vm_stat_diff[i] = 0;
66949 local_irq_restore(flags);
66950 - atomic_long_add(v, &zone->vm_stat[i]);
66951 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
66952 global_diff[i] += v;
66953 #ifdef CONFIG_NUMA
66954 /* 3 seconds idle till flush */
66955 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
66956
66957 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
66958 if (global_diff[i])
66959 - atomic_long_add(global_diff[i], &vm_stat[i]);
66960 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
66961 }
66962
66963 #endif
66964 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
66965 start_cpu_timer(cpu);
66966 #endif
66967 #ifdef CONFIG_PROC_FS
66968 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
66969 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
66970 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
66971 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
66972 + {
66973 + mode_t gr_mode = S_IRUGO;
66974 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66975 + gr_mode = S_IRUSR;
66976 +#endif
66977 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
66978 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
66979 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
66980 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
66981 +#else
66982 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
66983 +#endif
66984 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
66985 + }
66986 #endif
66987 return 0;
66988 }
66989 diff -urNp linux-2.6.32.42/net/8021q/vlan.c linux-2.6.32.42/net/8021q/vlan.c
66990 --- linux-2.6.32.42/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
66991 +++ linux-2.6.32.42/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
66992 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
66993 err = -EPERM;
66994 if (!capable(CAP_NET_ADMIN))
66995 break;
66996 - if ((args.u.name_type >= 0) &&
66997 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
66998 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
66999 struct vlan_net *vn;
67000
67001 vn = net_generic(net, vlan_net_id);
67002 diff -urNp linux-2.6.32.42/net/atm/atm_misc.c linux-2.6.32.42/net/atm/atm_misc.c
67003 --- linux-2.6.32.42/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
67004 +++ linux-2.6.32.42/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
67005 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
67006 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
67007 return 1;
67008 atm_return(vcc,truesize);
67009 - atomic_inc(&vcc->stats->rx_drop);
67010 + atomic_inc_unchecked(&vcc->stats->rx_drop);
67011 return 0;
67012 }
67013
67014 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
67015 }
67016 }
67017 atm_return(vcc,guess);
67018 - atomic_inc(&vcc->stats->rx_drop);
67019 + atomic_inc_unchecked(&vcc->stats->rx_drop);
67020 return NULL;
67021 }
67022
67023 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
67024
67025 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
67026 {
67027 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67028 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67029 __SONET_ITEMS
67030 #undef __HANDLE_ITEM
67031 }
67032 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
67033
67034 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
67035 {
67036 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
67037 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
67038 __SONET_ITEMS
67039 #undef __HANDLE_ITEM
67040 }
67041 diff -urNp linux-2.6.32.42/net/atm/mpoa_caches.c linux-2.6.32.42/net/atm/mpoa_caches.c
67042 --- linux-2.6.32.42/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
67043 +++ linux-2.6.32.42/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
67044 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
67045 struct timeval now;
67046 struct k_message msg;
67047
67048 + pax_track_stack();
67049 +
67050 do_gettimeofday(&now);
67051
67052 write_lock_irq(&client->egress_lock);
67053 diff -urNp linux-2.6.32.42/net/atm/proc.c linux-2.6.32.42/net/atm/proc.c
67054 --- linux-2.6.32.42/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
67055 +++ linux-2.6.32.42/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
67056 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
67057 const struct k_atm_aal_stats *stats)
67058 {
67059 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
67060 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
67061 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
67062 - atomic_read(&stats->rx_drop));
67063 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
67064 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
67065 + atomic_read_unchecked(&stats->rx_drop));
67066 }
67067
67068 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
67069 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
67070 {
67071 struct sock *sk = sk_atm(vcc);
67072
67073 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67074 + seq_printf(seq, "%p ", NULL);
67075 +#else
67076 seq_printf(seq, "%p ", vcc);
67077 +#endif
67078 +
67079 if (!vcc->dev)
67080 seq_printf(seq, "Unassigned ");
67081 else
67082 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
67083 {
67084 if (!vcc->dev)
67085 seq_printf(seq, sizeof(void *) == 4 ?
67086 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67087 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
67088 +#else
67089 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
67090 +#endif
67091 else
67092 seq_printf(seq, "%3d %3d %5d ",
67093 vcc->dev->number, vcc->vpi, vcc->vci);
67094 diff -urNp linux-2.6.32.42/net/atm/resources.c linux-2.6.32.42/net/atm/resources.c
67095 --- linux-2.6.32.42/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
67096 +++ linux-2.6.32.42/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
67097 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
67098 static void copy_aal_stats(struct k_atm_aal_stats *from,
67099 struct atm_aal_stats *to)
67100 {
67101 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67102 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67103 __AAL_STAT_ITEMS
67104 #undef __HANDLE_ITEM
67105 }
67106 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
67107 static void subtract_aal_stats(struct k_atm_aal_stats *from,
67108 struct atm_aal_stats *to)
67109 {
67110 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
67111 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
67112 __AAL_STAT_ITEMS
67113 #undef __HANDLE_ITEM
67114 }
67115 diff -urNp linux-2.6.32.42/net/bluetooth/l2cap.c linux-2.6.32.42/net/bluetooth/l2cap.c
67116 --- linux-2.6.32.42/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
67117 +++ linux-2.6.32.42/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
67118 @@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
67119 err = -ENOTCONN;
67120 break;
67121 }
67122 -
67123 + memset(&cinfo, 0, sizeof(cinfo));
67124 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
67125 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
67126
67127 @@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
67128
67129 /* Reject if config buffer is too small. */
67130 len = cmd_len - sizeof(*req);
67131 - if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
67132 + if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
67133 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
67134 l2cap_build_conf_rsp(sk, rsp,
67135 L2CAP_CONF_REJECT, flags), rsp);
67136 diff -urNp linux-2.6.32.42/net/bluetooth/rfcomm/sock.c linux-2.6.32.42/net/bluetooth/rfcomm/sock.c
67137 --- linux-2.6.32.42/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
67138 +++ linux-2.6.32.42/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
67139 @@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
67140
67141 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
67142
67143 + memset(&cinfo, 0, sizeof(cinfo));
67144 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
67145 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
67146
67147 diff -urNp linux-2.6.32.42/net/bridge/br_private.h linux-2.6.32.42/net/bridge/br_private.h
67148 --- linux-2.6.32.42/net/bridge/br_private.h 2011-03-27 14:31:47.000000000 -0400
67149 +++ linux-2.6.32.42/net/bridge/br_private.h 2011-04-17 15:56:46.000000000 -0400
67150 @@ -254,7 +254,7 @@ extern void br_ifinfo_notify(int event,
67151
67152 #ifdef CONFIG_SYSFS
67153 /* br_sysfs_if.c */
67154 -extern struct sysfs_ops brport_sysfs_ops;
67155 +extern const struct sysfs_ops brport_sysfs_ops;
67156 extern int br_sysfs_addif(struct net_bridge_port *p);
67157
67158 /* br_sysfs_br.c */
67159 diff -urNp linux-2.6.32.42/net/bridge/br_stp_if.c linux-2.6.32.42/net/bridge/br_stp_if.c
67160 --- linux-2.6.32.42/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
67161 +++ linux-2.6.32.42/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
67162 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
67163 char *envp[] = { NULL };
67164
67165 if (br->stp_enabled == BR_USER_STP) {
67166 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
67167 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
67168 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
67169 br->dev->name, r);
67170
67171 diff -urNp linux-2.6.32.42/net/bridge/br_sysfs_if.c linux-2.6.32.42/net/bridge/br_sysfs_if.c
67172 --- linux-2.6.32.42/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
67173 +++ linux-2.6.32.42/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
67174 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
67175 return ret;
67176 }
67177
67178 -struct sysfs_ops brport_sysfs_ops = {
67179 +const struct sysfs_ops brport_sysfs_ops = {
67180 .show = brport_show,
67181 .store = brport_store,
67182 };
67183 diff -urNp linux-2.6.32.42/net/bridge/netfilter/ebtables.c linux-2.6.32.42/net/bridge/netfilter/ebtables.c
67184 --- linux-2.6.32.42/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
67185 +++ linux-2.6.32.42/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
67186 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
67187 unsigned int entries_size, nentries;
67188 char *entries;
67189
67190 + pax_track_stack();
67191 +
67192 if (cmd == EBT_SO_GET_ENTRIES) {
67193 entries_size = t->private->entries_size;
67194 nentries = t->private->nentries;
67195 diff -urNp linux-2.6.32.42/net/can/bcm.c linux-2.6.32.42/net/can/bcm.c
67196 --- linux-2.6.32.42/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
67197 +++ linux-2.6.32.42/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
67198 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
67199 struct bcm_sock *bo = bcm_sk(sk);
67200 struct bcm_op *op;
67201
67202 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67203 + seq_printf(m, ">>> socket %p", NULL);
67204 + seq_printf(m, " / sk %p", NULL);
67205 + seq_printf(m, " / bo %p", NULL);
67206 +#else
67207 seq_printf(m, ">>> socket %p", sk->sk_socket);
67208 seq_printf(m, " / sk %p", sk);
67209 seq_printf(m, " / bo %p", bo);
67210 +#endif
67211 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
67212 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
67213 seq_printf(m, " <<<\n");
67214 diff -urNp linux-2.6.32.42/net/core/dev.c linux-2.6.32.42/net/core/dev.c
67215 --- linux-2.6.32.42/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
67216 +++ linux-2.6.32.42/net/core/dev.c 2011-04-17 17:04:18.000000000 -0400
67217 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
67218 if (no_module && capable(CAP_NET_ADMIN))
67219 no_module = request_module("netdev-%s", name);
67220 if (no_module && capable(CAP_SYS_MODULE)) {
67221 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67222 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
67223 +#else
67224 if (!request_module("%s", name))
67225 pr_err("Loading kernel module for a network device "
67226 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
67227 "instead\n", name);
67228 +#endif
67229 }
67230 }
67231 EXPORT_SYMBOL(dev_load);
67232 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
67233 }
67234 EXPORT_SYMBOL(netif_rx_ni);
67235
67236 -static void net_tx_action(struct softirq_action *h)
67237 +static void net_tx_action(void)
67238 {
67239 struct softnet_data *sd = &__get_cpu_var(softnet_data);
67240
67241 @@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
67242 EXPORT_SYMBOL(netif_napi_del);
67243
67244
67245 -static void net_rx_action(struct softirq_action *h)
67246 +static void net_rx_action(void)
67247 {
67248 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
67249 unsigned long time_limit = jiffies + 2;
67250 diff -urNp linux-2.6.32.42/net/core/flow.c linux-2.6.32.42/net/core/flow.c
67251 --- linux-2.6.32.42/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
67252 +++ linux-2.6.32.42/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
67253 @@ -35,11 +35,11 @@ struct flow_cache_entry {
67254 atomic_t *object_ref;
67255 };
67256
67257 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
67258 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
67259
67260 static u32 flow_hash_shift;
67261 #define flow_hash_size (1 << flow_hash_shift)
67262 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
67263 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
67264
67265 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
67266
67267 @@ -52,7 +52,7 @@ struct flow_percpu_info {
67268 u32 hash_rnd;
67269 int count;
67270 };
67271 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
67272 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
67273
67274 #define flow_hash_rnd_recalc(cpu) \
67275 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
67276 @@ -69,7 +69,7 @@ struct flow_flush_info {
67277 atomic_t cpuleft;
67278 struct completion completion;
67279 };
67280 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
67281 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
67282
67283 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
67284
67285 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
67286 if (fle->family == family &&
67287 fle->dir == dir &&
67288 flow_key_compare(key, &fle->key) == 0) {
67289 - if (fle->genid == atomic_read(&flow_cache_genid)) {
67290 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
67291 void *ret = fle->object;
67292
67293 if (ret)
67294 @@ -228,7 +228,7 @@ nocache:
67295 err = resolver(net, key, family, dir, &obj, &obj_ref);
67296
67297 if (fle && !err) {
67298 - fle->genid = atomic_read(&flow_cache_genid);
67299 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
67300
67301 if (fle->object)
67302 atomic_dec(fle->object_ref);
67303 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
67304
67305 fle = flow_table(cpu)[i];
67306 for (; fle; fle = fle->next) {
67307 - unsigned genid = atomic_read(&flow_cache_genid);
67308 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
67309
67310 if (!fle->object || fle->genid == genid)
67311 continue;
67312 diff -urNp linux-2.6.32.42/net/core/skbuff.c linux-2.6.32.42/net/core/skbuff.c
67313 --- linux-2.6.32.42/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
67314 +++ linux-2.6.32.42/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
67315 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
67316 struct sk_buff *frag_iter;
67317 struct sock *sk = skb->sk;
67318
67319 + pax_track_stack();
67320 +
67321 /*
67322 * __skb_splice_bits() only fails if the output has no room left,
67323 * so no point in going over the frag_list for the error case.
67324 diff -urNp linux-2.6.32.42/net/core/sock.c linux-2.6.32.42/net/core/sock.c
67325 --- linux-2.6.32.42/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
67326 +++ linux-2.6.32.42/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
67327 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
67328 break;
67329
67330 case SO_PEERCRED:
67331 + {
67332 + struct ucred peercred;
67333 if (len > sizeof(sk->sk_peercred))
67334 len = sizeof(sk->sk_peercred);
67335 - if (copy_to_user(optval, &sk->sk_peercred, len))
67336 + peercred = sk->sk_peercred;
67337 + if (copy_to_user(optval, &peercred, len))
67338 return -EFAULT;
67339 goto lenout;
67340 + }
67341
67342 case SO_PEERNAME:
67343 {
67344 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
67345 */
67346 smp_wmb();
67347 atomic_set(&sk->sk_refcnt, 1);
67348 - atomic_set(&sk->sk_drops, 0);
67349 + atomic_set_unchecked(&sk->sk_drops, 0);
67350 }
67351 EXPORT_SYMBOL(sock_init_data);
67352
67353 diff -urNp linux-2.6.32.42/net/decnet/sysctl_net_decnet.c linux-2.6.32.42/net/decnet/sysctl_net_decnet.c
67354 --- linux-2.6.32.42/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
67355 +++ linux-2.6.32.42/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
67356 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
67357
67358 if (len > *lenp) len = *lenp;
67359
67360 - if (copy_to_user(buffer, addr, len))
67361 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
67362 return -EFAULT;
67363
67364 *lenp = len;
67365 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
67366
67367 if (len > *lenp) len = *lenp;
67368
67369 - if (copy_to_user(buffer, devname, len))
67370 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
67371 return -EFAULT;
67372
67373 *lenp = len;
67374 diff -urNp linux-2.6.32.42/net/econet/Kconfig linux-2.6.32.42/net/econet/Kconfig
67375 --- linux-2.6.32.42/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
67376 +++ linux-2.6.32.42/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
67377 @@ -4,7 +4,7 @@
67378
67379 config ECONET
67380 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
67381 - depends on EXPERIMENTAL && INET
67382 + depends on EXPERIMENTAL && INET && BROKEN
67383 ---help---
67384 Econet is a fairly old and slow networking protocol mainly used by
67385 Acorn computers to access file and print servers. It uses native
67386 diff -urNp linux-2.6.32.42/net/ieee802154/dgram.c linux-2.6.32.42/net/ieee802154/dgram.c
67387 --- linux-2.6.32.42/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
67388 +++ linux-2.6.32.42/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
67389 @@ -318,7 +318,7 @@ out:
67390 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
67391 {
67392 if (sock_queue_rcv_skb(sk, skb) < 0) {
67393 - atomic_inc(&sk->sk_drops);
67394 + atomic_inc_unchecked(&sk->sk_drops);
67395 kfree_skb(skb);
67396 return NET_RX_DROP;
67397 }
67398 diff -urNp linux-2.6.32.42/net/ieee802154/raw.c linux-2.6.32.42/net/ieee802154/raw.c
67399 --- linux-2.6.32.42/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
67400 +++ linux-2.6.32.42/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
67401 @@ -206,7 +206,7 @@ out:
67402 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
67403 {
67404 if (sock_queue_rcv_skb(sk, skb) < 0) {
67405 - atomic_inc(&sk->sk_drops);
67406 + atomic_inc_unchecked(&sk->sk_drops);
67407 kfree_skb(skb);
67408 return NET_RX_DROP;
67409 }
67410 diff -urNp linux-2.6.32.42/net/ipv4/inet_diag.c linux-2.6.32.42/net/ipv4/inet_diag.c
67411 --- linux-2.6.32.42/net/ipv4/inet_diag.c 2011-04-17 17:00:52.000000000 -0400
67412 +++ linux-2.6.32.42/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
67413 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
67414 r->idiag_retrans = 0;
67415
67416 r->id.idiag_if = sk->sk_bound_dev_if;
67417 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67418 + r->id.idiag_cookie[0] = 0;
67419 + r->id.idiag_cookie[1] = 0;
67420 +#else
67421 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
67422 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
67423 +#endif
67424
67425 r->id.idiag_sport = inet->sport;
67426 r->id.idiag_dport = inet->dport;
67427 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
67428 r->idiag_family = tw->tw_family;
67429 r->idiag_retrans = 0;
67430 r->id.idiag_if = tw->tw_bound_dev_if;
67431 +
67432 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67433 + r->id.idiag_cookie[0] = 0;
67434 + r->id.idiag_cookie[1] = 0;
67435 +#else
67436 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
67437 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
67438 +#endif
67439 +
67440 r->id.idiag_sport = tw->tw_sport;
67441 r->id.idiag_dport = tw->tw_dport;
67442 r->id.idiag_src[0] = tw->tw_rcv_saddr;
67443 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
67444 if (sk == NULL)
67445 goto unlock;
67446
67447 +#ifndef CONFIG_GRKERNSEC_HIDESYM
67448 err = -ESTALE;
67449 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
67450 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
67451 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
67452 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
67453 goto out;
67454 +#endif
67455
67456 err = -ENOMEM;
67457 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
67458 @@ -436,7 +450,7 @@ static int valid_cc(const void *bc, int
67459 return 0;
67460 if (cc == len)
67461 return 1;
67462 - if (op->yes < 4)
67463 + if (op->yes < 4 || op->yes & 3)
67464 return 0;
67465 len -= op->yes;
67466 bc += op->yes;
67467 @@ -446,11 +460,11 @@ static int valid_cc(const void *bc, int
67468
67469 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
67470 {
67471 - const unsigned char *bc = bytecode;
67472 + const void *bc = bytecode;
67473 int len = bytecode_len;
67474
67475 while (len > 0) {
67476 - struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc;
67477 + const struct inet_diag_bc_op *op = bc;
67478
67479 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
67480 switch (op->code) {
67481 @@ -461,22 +475,20 @@ static int inet_diag_bc_audit(const void
67482 case INET_DIAG_BC_S_LE:
67483 case INET_DIAG_BC_D_GE:
67484 case INET_DIAG_BC_D_LE:
67485 - if (op->yes < 4 || op->yes > len + 4)
67486 - return -EINVAL;
67487 case INET_DIAG_BC_JMP:
67488 - if (op->no < 4 || op->no > len + 4)
67489 + if (op->no < 4 || op->no > len + 4 || op->no & 3)
67490 return -EINVAL;
67491 if (op->no < len &&
67492 !valid_cc(bytecode, bytecode_len, len - op->no))
67493 return -EINVAL;
67494 break;
67495 case INET_DIAG_BC_NOP:
67496 - if (op->yes < 4 || op->yes > len + 4)
67497 - return -EINVAL;
67498 break;
67499 default:
67500 return -EINVAL;
67501 }
67502 + if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
67503 + return -EINVAL;
67504 bc += op->yes;
67505 len -= op->yes;
67506 }
67507 @@ -581,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
67508 r->idiag_retrans = req->retrans;
67509
67510 r->id.idiag_if = sk->sk_bound_dev_if;
67511 +
67512 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67513 + r->id.idiag_cookie[0] = 0;
67514 + r->id.idiag_cookie[1] = 0;
67515 +#else
67516 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
67517 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
67518 +#endif
67519
67520 tmo = req->expires - jiffies;
67521 if (tmo < 0)
67522 diff -urNp linux-2.6.32.42/net/ipv4/inet_hashtables.c linux-2.6.32.42/net/ipv4/inet_hashtables.c
67523 --- linux-2.6.32.42/net/ipv4/inet_hashtables.c 2011-03-27 14:31:47.000000000 -0400
67524 +++ linux-2.6.32.42/net/ipv4/inet_hashtables.c 2011-04-17 15:56:46.000000000 -0400
67525 @@ -18,11 +18,14 @@
67526 #include <linux/sched.h>
67527 #include <linux/slab.h>
67528 #include <linux/wait.h>
67529 +#include <linux/security.h>
67530
67531 #include <net/inet_connection_sock.h>
67532 #include <net/inet_hashtables.h>
67533 #include <net/ip.h>
67534
67535 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
67536 +
67537 /*
67538 * Allocate and initialize a new local port bind bucket.
67539 * The bindhash mutex for snum's hash chain must be held here.
67540 @@ -490,6 +493,8 @@ ok:
67541 }
67542 spin_unlock(&head->lock);
67543
67544 + gr_update_task_in_ip_table(current, inet_sk(sk));
67545 +
67546 if (tw) {
67547 inet_twsk_deschedule(tw, death_row);
67548 inet_twsk_put(tw);
67549 diff -urNp linux-2.6.32.42/net/ipv4/inetpeer.c linux-2.6.32.42/net/ipv4/inetpeer.c
67550 --- linux-2.6.32.42/net/ipv4/inetpeer.c 2011-03-27 14:31:47.000000000 -0400
67551 +++ linux-2.6.32.42/net/ipv4/inetpeer.c 2011-05-16 21:46:57.000000000 -0400
67552 @@ -366,6 +366,8 @@ struct inet_peer *inet_getpeer(__be32 da
67553 struct inet_peer *p, *n;
67554 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
67555
67556 + pax_track_stack();
67557 +
67558 /* Look up for the address quickly. */
67559 read_lock_bh(&peer_pool_lock);
67560 p = lookup(daddr, NULL);
67561 @@ -389,7 +391,7 @@ struct inet_peer *inet_getpeer(__be32 da
67562 return NULL;
67563 n->v4daddr = daddr;
67564 atomic_set(&n->refcnt, 1);
67565 - atomic_set(&n->rid, 0);
67566 + atomic_set_unchecked(&n->rid, 0);
67567 n->ip_id_count = secure_ip_id(daddr);
67568 n->tcp_ts_stamp = 0;
67569
67570 diff -urNp linux-2.6.32.42/net/ipv4/ip_fragment.c linux-2.6.32.42/net/ipv4/ip_fragment.c
67571 --- linux-2.6.32.42/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
67572 +++ linux-2.6.32.42/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
67573 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
67574 return 0;
67575
67576 start = qp->rid;
67577 - end = atomic_inc_return(&peer->rid);
67578 + end = atomic_inc_return_unchecked(&peer->rid);
67579 qp->rid = end;
67580
67581 rc = qp->q.fragments && (end - start) > max;
67582 diff -urNp linux-2.6.32.42/net/ipv4/ip_sockglue.c linux-2.6.32.42/net/ipv4/ip_sockglue.c
67583 --- linux-2.6.32.42/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
67584 +++ linux-2.6.32.42/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
67585 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
67586 int val;
67587 int len;
67588
67589 + pax_track_stack();
67590 +
67591 if (level != SOL_IP)
67592 return -EOPNOTSUPP;
67593
67594 diff -urNp linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c
67595 --- linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
67596 +++ linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
67597 @@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
67598 private = &tmp;
67599 }
67600 #endif
67601 + memset(&info, 0, sizeof(info));
67602 info.valid_hooks = t->valid_hooks;
67603 memcpy(info.hook_entry, private->hook_entry,
67604 sizeof(info.hook_entry));
67605 diff -urNp linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c
67606 --- linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
67607 +++ linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
67608 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
67609 private = &tmp;
67610 }
67611 #endif
67612 + memset(&info, 0, sizeof(info));
67613 info.valid_hooks = t->valid_hooks;
67614 memcpy(info.hook_entry, private->hook_entry,
67615 sizeof(info.hook_entry));
67616 diff -urNp linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c
67617 --- linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
67618 +++ linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
67619 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
67620
67621 *len = 0;
67622
67623 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
67624 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
67625 if (*octets == NULL) {
67626 if (net_ratelimit())
67627 printk("OOM in bsalg (%d)\n", __LINE__);
67628 diff -urNp linux-2.6.32.42/net/ipv4/raw.c linux-2.6.32.42/net/ipv4/raw.c
67629 --- linux-2.6.32.42/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
67630 +++ linux-2.6.32.42/net/ipv4/raw.c 2011-05-04 17:59:08.000000000 -0400
67631 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
67632 /* Charge it to the socket. */
67633
67634 if (sock_queue_rcv_skb(sk, skb) < 0) {
67635 - atomic_inc(&sk->sk_drops);
67636 + atomic_inc_unchecked(&sk->sk_drops);
67637 kfree_skb(skb);
67638 return NET_RX_DROP;
67639 }
67640 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
67641 int raw_rcv(struct sock *sk, struct sk_buff *skb)
67642 {
67643 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
67644 - atomic_inc(&sk->sk_drops);
67645 + atomic_inc_unchecked(&sk->sk_drops);
67646 kfree_skb(skb);
67647 return NET_RX_DROP;
67648 }
67649 @@ -724,15 +724,22 @@ static int raw_init(struct sock *sk)
67650
67651 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
67652 {
67653 + struct icmp_filter filter;
67654 +
67655 + if (optlen < 0)
67656 + return -EINVAL;
67657 if (optlen > sizeof(struct icmp_filter))
67658 optlen = sizeof(struct icmp_filter);
67659 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
67660 + if (copy_from_user(&filter, optval, optlen))
67661 return -EFAULT;
67662 + memcpy(&raw_sk(sk)->filter, &filter, optlen);
67663 +
67664 return 0;
67665 }
67666
67667 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
67668 {
67669 + struct icmp_filter filter;
67670 int len, ret = -EFAULT;
67671
67672 if (get_user(len, optlen))
67673 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
67674 if (len > sizeof(struct icmp_filter))
67675 len = sizeof(struct icmp_filter);
67676 ret = -EFAULT;
67677 + memcpy(&filter, &raw_sk(sk)->filter, len);
67678 if (put_user(len, optlen) ||
67679 - copy_to_user(optval, &raw_sk(sk)->filter, len))
67680 + copy_to_user(optval, &filter, len))
67681 goto out;
67682 ret = 0;
67683 out: return ret;
67684 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
67685 sk_wmem_alloc_get(sp),
67686 sk_rmem_alloc_get(sp),
67687 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
67688 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
67689 + atomic_read(&sp->sk_refcnt),
67690 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67691 + NULL,
67692 +#else
67693 + sp,
67694 +#endif
67695 + atomic_read_unchecked(&sp->sk_drops));
67696 }
67697
67698 static int raw_seq_show(struct seq_file *seq, void *v)
67699 diff -urNp linux-2.6.32.42/net/ipv4/route.c linux-2.6.32.42/net/ipv4/route.c
67700 --- linux-2.6.32.42/net/ipv4/route.c 2011-03-27 14:31:47.000000000 -0400
67701 +++ linux-2.6.32.42/net/ipv4/route.c 2011-05-04 17:56:28.000000000 -0400
67702 @@ -268,7 +268,7 @@ static inline unsigned int rt_hash(__be3
67703
67704 static inline int rt_genid(struct net *net)
67705 {
67706 - return atomic_read(&net->ipv4.rt_genid);
67707 + return atomic_read_unchecked(&net->ipv4.rt_genid);
67708 }
67709
67710 #ifdef CONFIG_PROC_FS
67711 @@ -888,7 +888,7 @@ static void rt_cache_invalidate(struct n
67712 unsigned char shuffle;
67713
67714 get_random_bytes(&shuffle, sizeof(shuffle));
67715 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
67716 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
67717 }
67718
67719 /*
67720 @@ -3356,7 +3356,7 @@ static __net_initdata struct pernet_oper
67721
67722 static __net_init int rt_secret_timer_init(struct net *net)
67723 {
67724 - atomic_set(&net->ipv4.rt_genid,
67725 + atomic_set_unchecked(&net->ipv4.rt_genid,
67726 (int) ((num_physpages ^ (num_physpages>>8)) ^
67727 (jiffies ^ (jiffies >> 7))));
67728
67729 diff -urNp linux-2.6.32.42/net/ipv4/tcp.c linux-2.6.32.42/net/ipv4/tcp.c
67730 --- linux-2.6.32.42/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
67731 +++ linux-2.6.32.42/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
67732 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
67733 int val;
67734 int err = 0;
67735
67736 + pax_track_stack();
67737 +
67738 /* This is a string value all the others are int's */
67739 if (optname == TCP_CONGESTION) {
67740 char name[TCP_CA_NAME_MAX];
67741 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
67742 struct tcp_sock *tp = tcp_sk(sk);
67743 int val, len;
67744
67745 + pax_track_stack();
67746 +
67747 if (get_user(len, optlen))
67748 return -EFAULT;
67749
67750 diff -urNp linux-2.6.32.42/net/ipv4/tcp_ipv4.c linux-2.6.32.42/net/ipv4/tcp_ipv4.c
67751 --- linux-2.6.32.42/net/ipv4/tcp_ipv4.c 2011-03-27 14:31:47.000000000 -0400
67752 +++ linux-2.6.32.42/net/ipv4/tcp_ipv4.c 2011-04-17 15:56:46.000000000 -0400
67753 @@ -84,6 +84,9 @@
67754 int sysctl_tcp_tw_reuse __read_mostly;
67755 int sysctl_tcp_low_latency __read_mostly;
67756
67757 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67758 +extern int grsec_enable_blackhole;
67759 +#endif
67760
67761 #ifdef CONFIG_TCP_MD5SIG
67762 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
67763 @@ -1542,6 +1545,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
67764 return 0;
67765
67766 reset:
67767 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67768 + if (!grsec_enable_blackhole)
67769 +#endif
67770 tcp_v4_send_reset(rsk, skb);
67771 discard:
67772 kfree_skb(skb);
67773 @@ -1603,12 +1609,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
67774 TCP_SKB_CB(skb)->sacked = 0;
67775
67776 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
67777 - if (!sk)
67778 + if (!sk) {
67779 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67780 + ret = 1;
67781 +#endif
67782 goto no_tcp_socket;
67783 + }
67784
67785 process:
67786 - if (sk->sk_state == TCP_TIME_WAIT)
67787 + if (sk->sk_state == TCP_TIME_WAIT) {
67788 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67789 + ret = 2;
67790 +#endif
67791 goto do_time_wait;
67792 + }
67793
67794 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
67795 goto discard_and_relse;
67796 @@ -1650,6 +1664,10 @@ no_tcp_socket:
67797 bad_packet:
67798 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
67799 } else {
67800 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67801 + if (!grsec_enable_blackhole || (ret == 1 &&
67802 + (skb->dev->flags & IFF_LOOPBACK)))
67803 +#endif
67804 tcp_v4_send_reset(NULL, skb);
67805 }
67806
67807 @@ -2237,7 +2255,11 @@ static void get_openreq4(struct sock *sk
67808 0, /* non standard timer */
67809 0, /* open_requests have no inode */
67810 atomic_read(&sk->sk_refcnt),
67811 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67812 + NULL,
67813 +#else
67814 req,
67815 +#endif
67816 len);
67817 }
67818
67819 @@ -2279,7 +2301,12 @@ static void get_tcp4_sock(struct sock *s
67820 sock_i_uid(sk),
67821 icsk->icsk_probes_out,
67822 sock_i_ino(sk),
67823 - atomic_read(&sk->sk_refcnt), sk,
67824 + atomic_read(&sk->sk_refcnt),
67825 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67826 + NULL,
67827 +#else
67828 + sk,
67829 +#endif
67830 jiffies_to_clock_t(icsk->icsk_rto),
67831 jiffies_to_clock_t(icsk->icsk_ack.ato),
67832 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
67833 @@ -2307,7 +2334,13 @@ static void get_timewait4_sock(struct in
67834 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
67835 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
67836 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
67837 - atomic_read(&tw->tw_refcnt), tw, len);
67838 + atomic_read(&tw->tw_refcnt),
67839 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67840 + NULL,
67841 +#else
67842 + tw,
67843 +#endif
67844 + len);
67845 }
67846
67847 #define TMPSZ 150
67848 diff -urNp linux-2.6.32.42/net/ipv4/tcp_minisocks.c linux-2.6.32.42/net/ipv4/tcp_minisocks.c
67849 --- linux-2.6.32.42/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
67850 +++ linux-2.6.32.42/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
67851 @@ -26,6 +26,10 @@
67852 #include <net/inet_common.h>
67853 #include <net/xfrm.h>
67854
67855 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67856 +extern int grsec_enable_blackhole;
67857 +#endif
67858 +
67859 #ifdef CONFIG_SYSCTL
67860 #define SYNC_INIT 0 /* let the user enable it */
67861 #else
67862 @@ -672,6 +676,10 @@ listen_overflow:
67863
67864 embryonic_reset:
67865 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
67866 +
67867 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67868 + if (!grsec_enable_blackhole)
67869 +#endif
67870 if (!(flg & TCP_FLAG_RST))
67871 req->rsk_ops->send_reset(sk, skb);
67872
67873 diff -urNp linux-2.6.32.42/net/ipv4/tcp_output.c linux-2.6.32.42/net/ipv4/tcp_output.c
67874 --- linux-2.6.32.42/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
67875 +++ linux-2.6.32.42/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
67876 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
67877 __u8 *md5_hash_location;
67878 int mss;
67879
67880 + pax_track_stack();
67881 +
67882 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
67883 if (skb == NULL)
67884 return NULL;
67885 diff -urNp linux-2.6.32.42/net/ipv4/tcp_probe.c linux-2.6.32.42/net/ipv4/tcp_probe.c
67886 --- linux-2.6.32.42/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
67887 +++ linux-2.6.32.42/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
67888 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
67889 if (cnt + width >= len)
67890 break;
67891
67892 - if (copy_to_user(buf + cnt, tbuf, width))
67893 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
67894 return -EFAULT;
67895 cnt += width;
67896 }
67897 diff -urNp linux-2.6.32.42/net/ipv4/tcp_timer.c linux-2.6.32.42/net/ipv4/tcp_timer.c
67898 --- linux-2.6.32.42/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
67899 +++ linux-2.6.32.42/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
67900 @@ -21,6 +21,10 @@
67901 #include <linux/module.h>
67902 #include <net/tcp.h>
67903
67904 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67905 +extern int grsec_lastack_retries;
67906 +#endif
67907 +
67908 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
67909 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
67910 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
67911 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
67912 }
67913 }
67914
67915 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67916 + if ((sk->sk_state == TCP_LAST_ACK) &&
67917 + (grsec_lastack_retries > 0) &&
67918 + (grsec_lastack_retries < retry_until))
67919 + retry_until = grsec_lastack_retries;
67920 +#endif
67921 +
67922 if (retransmits_timed_out(sk, retry_until)) {
67923 /* Has it gone just too far? */
67924 tcp_write_err(sk);
67925 diff -urNp linux-2.6.32.42/net/ipv4/udp.c linux-2.6.32.42/net/ipv4/udp.c
67926 --- linux-2.6.32.42/net/ipv4/udp.c 2011-03-27 14:31:47.000000000 -0400
67927 +++ linux-2.6.32.42/net/ipv4/udp.c 2011-05-04 17:57:28.000000000 -0400
67928 @@ -86,6 +86,7 @@
67929 #include <linux/types.h>
67930 #include <linux/fcntl.h>
67931 #include <linux/module.h>
67932 +#include <linux/security.h>
67933 #include <linux/socket.h>
67934 #include <linux/sockios.h>
67935 #include <linux/igmp.h>
67936 @@ -106,6 +107,10 @@
67937 #include <net/xfrm.h>
67938 #include "udp_impl.h"
67939
67940 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67941 +extern int grsec_enable_blackhole;
67942 +#endif
67943 +
67944 struct udp_table udp_table;
67945 EXPORT_SYMBOL(udp_table);
67946
67947 @@ -371,6 +376,9 @@ found:
67948 return s;
67949 }
67950
67951 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
67952 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
67953 +
67954 /*
67955 * This routine is called by the ICMP module when it gets some
67956 * sort of error condition. If err < 0 then the socket should
67957 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
67958 dport = usin->sin_port;
67959 if (dport == 0)
67960 return -EINVAL;
67961 +
67962 + err = gr_search_udp_sendmsg(sk, usin);
67963 + if (err)
67964 + return err;
67965 } else {
67966 if (sk->sk_state != TCP_ESTABLISHED)
67967 return -EDESTADDRREQ;
67968 +
67969 + err = gr_search_udp_sendmsg(sk, NULL);
67970 + if (err)
67971 + return err;
67972 +
67973 daddr = inet->daddr;
67974 dport = inet->dport;
67975 /* Open fast path for connected socket.
67976 @@ -945,6 +962,10 @@ try_again:
67977 if (!skb)
67978 goto out;
67979
67980 + err = gr_search_udp_recvmsg(sk, skb);
67981 + if (err)
67982 + goto out_free;
67983 +
67984 ulen = skb->len - sizeof(struct udphdr);
67985 copied = len;
67986 if (copied > ulen)
67987 @@ -1065,7 +1086,7 @@ static int __udp_queue_rcv_skb(struct so
67988 if (rc == -ENOMEM) {
67989 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
67990 is_udplite);
67991 - atomic_inc(&sk->sk_drops);
67992 + atomic_inc_unchecked(&sk->sk_drops);
67993 }
67994 goto drop;
67995 }
67996 @@ -1335,6 +1356,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
67997 goto csum_error;
67998
67999 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
68000 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68001 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68002 +#endif
68003 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
68004
68005 /*
68006 @@ -1755,8 +1779,13 @@ static void udp4_format_sock(struct sock
68007 sk_wmem_alloc_get(sp),
68008 sk_rmem_alloc_get(sp),
68009 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68010 - atomic_read(&sp->sk_refcnt), sp,
68011 - atomic_read(&sp->sk_drops), len);
68012 + atomic_read(&sp->sk_refcnt),
68013 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68014 + NULL,
68015 +#else
68016 + sp,
68017 +#endif
68018 + atomic_read_unchecked(&sp->sk_drops), len);
68019 }
68020
68021 int udp4_seq_show(struct seq_file *seq, void *v)
68022 diff -urNp linux-2.6.32.42/net/ipv6/inet6_connection_sock.c linux-2.6.32.42/net/ipv6/inet6_connection_sock.c
68023 --- linux-2.6.32.42/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
68024 +++ linux-2.6.32.42/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
68025 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
68026 #ifdef CONFIG_XFRM
68027 {
68028 struct rt6_info *rt = (struct rt6_info *)dst;
68029 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
68030 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
68031 }
68032 #endif
68033 }
68034 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
68035 #ifdef CONFIG_XFRM
68036 if (dst) {
68037 struct rt6_info *rt = (struct rt6_info *)dst;
68038 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
68039 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
68040 sk->sk_dst_cache = NULL;
68041 dst_release(dst);
68042 dst = NULL;
68043 diff -urNp linux-2.6.32.42/net/ipv6/inet6_hashtables.c linux-2.6.32.42/net/ipv6/inet6_hashtables.c
68044 --- linux-2.6.32.42/net/ipv6/inet6_hashtables.c 2011-03-27 14:31:47.000000000 -0400
68045 +++ linux-2.6.32.42/net/ipv6/inet6_hashtables.c 2011-05-04 17:56:28.000000000 -0400
68046 @@ -118,7 +118,7 @@ out:
68047 }
68048 EXPORT_SYMBOL(__inet6_lookup_established);
68049
68050 -static int inline compute_score(struct sock *sk, struct net *net,
68051 +static inline int compute_score(struct sock *sk, struct net *net,
68052 const unsigned short hnum,
68053 const struct in6_addr *daddr,
68054 const int dif)
68055 diff -urNp linux-2.6.32.42/net/ipv6/ipv6_sockglue.c linux-2.6.32.42/net/ipv6/ipv6_sockglue.c
68056 --- linux-2.6.32.42/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
68057 +++ linux-2.6.32.42/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
68058 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
68059 int val, valbool;
68060 int retv = -ENOPROTOOPT;
68061
68062 + pax_track_stack();
68063 +
68064 if (optval == NULL)
68065 val=0;
68066 else {
68067 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
68068 int len;
68069 int val;
68070
68071 + pax_track_stack();
68072 +
68073 if (ip6_mroute_opt(optname))
68074 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
68075
68076 diff -urNp linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c
68077 --- linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
68078 +++ linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
68079 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
68080 private = &tmp;
68081 }
68082 #endif
68083 + memset(&info, 0, sizeof(info));
68084 info.valid_hooks = t->valid_hooks;
68085 memcpy(info.hook_entry, private->hook_entry,
68086 sizeof(info.hook_entry));
68087 diff -urNp linux-2.6.32.42/net/ipv6/raw.c linux-2.6.32.42/net/ipv6/raw.c
68088 --- linux-2.6.32.42/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
68089 +++ linux-2.6.32.42/net/ipv6/raw.c 2011-05-16 21:46:57.000000000 -0400
68090 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
68091 {
68092 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
68093 skb_checksum_complete(skb)) {
68094 - atomic_inc(&sk->sk_drops);
68095 + atomic_inc_unchecked(&sk->sk_drops);
68096 kfree_skb(skb);
68097 return NET_RX_DROP;
68098 }
68099
68100 /* Charge it to the socket. */
68101 if (sock_queue_rcv_skb(sk,skb)<0) {
68102 - atomic_inc(&sk->sk_drops);
68103 + atomic_inc_unchecked(&sk->sk_drops);
68104 kfree_skb(skb);
68105 return NET_RX_DROP;
68106 }
68107 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68108 struct raw6_sock *rp = raw6_sk(sk);
68109
68110 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
68111 - atomic_inc(&sk->sk_drops);
68112 + atomic_inc_unchecked(&sk->sk_drops);
68113 kfree_skb(skb);
68114 return NET_RX_DROP;
68115 }
68116 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68117
68118 if (inet->hdrincl) {
68119 if (skb_checksum_complete(skb)) {
68120 - atomic_inc(&sk->sk_drops);
68121 + atomic_inc_unchecked(&sk->sk_drops);
68122 kfree_skb(skb);
68123 return NET_RX_DROP;
68124 }
68125 @@ -518,7 +518,7 @@ csum_copy_err:
68126 as some normal condition.
68127 */
68128 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
68129 - atomic_inc(&sk->sk_drops);
68130 + atomic_inc_unchecked(&sk->sk_drops);
68131 goto out;
68132 }
68133
68134 @@ -600,7 +600,7 @@ out:
68135 return err;
68136 }
68137
68138 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
68139 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
68140 struct flowi *fl, struct rt6_info *rt,
68141 unsigned int flags)
68142 {
68143 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
68144 u16 proto;
68145 int err;
68146
68147 + pax_track_stack();
68148 +
68149 /* Rough check on arithmetic overflow,
68150 better check is made in ip6_append_data().
68151 */
68152 @@ -916,12 +918,17 @@ do_confirm:
68153 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
68154 char __user *optval, int optlen)
68155 {
68156 + struct icmp6_filter filter;
68157 +
68158 switch (optname) {
68159 case ICMPV6_FILTER:
68160 + if (optlen < 0)
68161 + return -EINVAL;
68162 if (optlen > sizeof(struct icmp6_filter))
68163 optlen = sizeof(struct icmp6_filter);
68164 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
68165 + if (copy_from_user(&filter, optval, optlen))
68166 return -EFAULT;
68167 + memcpy(&raw6_sk(sk)->filter, &filter, optlen);
68168 return 0;
68169 default:
68170 return -ENOPROTOOPT;
68171 @@ -933,6 +940,7 @@ static int rawv6_seticmpfilter(struct so
68172 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
68173 char __user *optval, int __user *optlen)
68174 {
68175 + struct icmp6_filter filter;
68176 int len;
68177
68178 switch (optname) {
68179 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
68180 len = sizeof(struct icmp6_filter);
68181 if (put_user(len, optlen))
68182 return -EFAULT;
68183 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
68184 + memcpy(&filter, &raw6_sk(sk)->filter, len);
68185 + if (copy_to_user(optval, &filter, len))
68186 return -EFAULT;
68187 return 0;
68188 default:
68189 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
68190 0, 0L, 0,
68191 sock_i_uid(sp), 0,
68192 sock_i_ino(sp),
68193 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68194 + atomic_read(&sp->sk_refcnt),
68195 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68196 + NULL,
68197 +#else
68198 + sp,
68199 +#endif
68200 + atomic_read_unchecked(&sp->sk_drops));
68201 }
68202
68203 static int raw6_seq_show(struct seq_file *seq, void *v)
68204 diff -urNp linux-2.6.32.42/net/ipv6/tcp_ipv6.c linux-2.6.32.42/net/ipv6/tcp_ipv6.c
68205 --- linux-2.6.32.42/net/ipv6/tcp_ipv6.c 2011-03-27 14:31:47.000000000 -0400
68206 +++ linux-2.6.32.42/net/ipv6/tcp_ipv6.c 2011-04-17 15:56:46.000000000 -0400
68207 @@ -88,6 +88,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
68208 }
68209 #endif
68210
68211 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68212 +extern int grsec_enable_blackhole;
68213 +#endif
68214 +
68215 static void tcp_v6_hash(struct sock *sk)
68216 {
68217 if (sk->sk_state != TCP_CLOSE) {
68218 @@ -1578,6 +1582,9 @@ static int tcp_v6_do_rcv(struct sock *sk
68219 return 0;
68220
68221 reset:
68222 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68223 + if (!grsec_enable_blackhole)
68224 +#endif
68225 tcp_v6_send_reset(sk, skb);
68226 discard:
68227 if (opt_skb)
68228 @@ -1655,12 +1662,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
68229 TCP_SKB_CB(skb)->sacked = 0;
68230
68231 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68232 - if (!sk)
68233 + if (!sk) {
68234 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68235 + ret = 1;
68236 +#endif
68237 goto no_tcp_socket;
68238 + }
68239
68240 process:
68241 - if (sk->sk_state == TCP_TIME_WAIT)
68242 + if (sk->sk_state == TCP_TIME_WAIT) {
68243 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68244 + ret = 2;
68245 +#endif
68246 goto do_time_wait;
68247 + }
68248
68249 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
68250 goto discard_and_relse;
68251 @@ -1700,6 +1715,10 @@ no_tcp_socket:
68252 bad_packet:
68253 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
68254 } else {
68255 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68256 + if (!grsec_enable_blackhole || (ret == 1 &&
68257 + (skb->dev->flags & IFF_LOOPBACK)))
68258 +#endif
68259 tcp_v6_send_reset(NULL, skb);
68260 }
68261
68262 @@ -1915,7 +1934,13 @@ static void get_openreq6(struct seq_file
68263 uid,
68264 0, /* non standard timer */
68265 0, /* open_requests have no inode */
68266 - 0, req);
68267 + 0,
68268 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68269 + NULL
68270 +#else
68271 + req
68272 +#endif
68273 + );
68274 }
68275
68276 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
68277 @@ -1965,7 +1990,12 @@ static void get_tcp6_sock(struct seq_fil
68278 sock_i_uid(sp),
68279 icsk->icsk_probes_out,
68280 sock_i_ino(sp),
68281 - atomic_read(&sp->sk_refcnt), sp,
68282 + atomic_read(&sp->sk_refcnt),
68283 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68284 + NULL,
68285 +#else
68286 + sp,
68287 +#endif
68288 jiffies_to_clock_t(icsk->icsk_rto),
68289 jiffies_to_clock_t(icsk->icsk_ack.ato),
68290 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
68291 @@ -2000,7 +2030,13 @@ static void get_timewait6_sock(struct se
68292 dest->s6_addr32[2], dest->s6_addr32[3], destp,
68293 tw->tw_substate, 0, 0,
68294 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68295 - atomic_read(&tw->tw_refcnt), tw);
68296 + atomic_read(&tw->tw_refcnt),
68297 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68298 + NULL
68299 +#else
68300 + tw
68301 +#endif
68302 + );
68303 }
68304
68305 static int tcp6_seq_show(struct seq_file *seq, void *v)
68306 diff -urNp linux-2.6.32.42/net/ipv6/udp.c linux-2.6.32.42/net/ipv6/udp.c
68307 --- linux-2.6.32.42/net/ipv6/udp.c 2011-03-27 14:31:47.000000000 -0400
68308 +++ linux-2.6.32.42/net/ipv6/udp.c 2011-05-04 17:58:16.000000000 -0400
68309 @@ -49,6 +49,10 @@
68310 #include <linux/seq_file.h>
68311 #include "udp_impl.h"
68312
68313 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68314 +extern int grsec_enable_blackhole;
68315 +#endif
68316 +
68317 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
68318 {
68319 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
68320 @@ -388,7 +392,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
68321 if (rc == -ENOMEM) {
68322 UDP6_INC_STATS_BH(sock_net(sk),
68323 UDP_MIB_RCVBUFERRORS, is_udplite);
68324 - atomic_inc(&sk->sk_drops);
68325 + atomic_inc_unchecked(&sk->sk_drops);
68326 }
68327 goto drop;
68328 }
68329 @@ -587,6 +591,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
68330 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
68331 proto == IPPROTO_UDPLITE);
68332
68333 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68334 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68335 +#endif
68336 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
68337
68338 kfree_skb(skb);
68339 @@ -1206,8 +1213,13 @@ static void udp6_sock_seq_show(struct se
68340 0, 0L, 0,
68341 sock_i_uid(sp), 0,
68342 sock_i_ino(sp),
68343 - atomic_read(&sp->sk_refcnt), sp,
68344 - atomic_read(&sp->sk_drops));
68345 + atomic_read(&sp->sk_refcnt),
68346 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68347 + NULL,
68348 +#else
68349 + sp,
68350 +#endif
68351 + atomic_read_unchecked(&sp->sk_drops));
68352 }
68353
68354 int udp6_seq_show(struct seq_file *seq, void *v)
68355 diff -urNp linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c
68356 --- linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
68357 +++ linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
68358 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
68359 add_wait_queue(&self->open_wait, &wait);
68360
68361 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
68362 - __FILE__,__LINE__, tty->driver->name, self->open_count );
68363 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68364
68365 /* As far as I can see, we protect open_count - Jean II */
68366 spin_lock_irqsave(&self->spinlock, flags);
68367 if (!tty_hung_up_p(filp)) {
68368 extra_count = 1;
68369 - self->open_count--;
68370 + local_dec(&self->open_count);
68371 }
68372 spin_unlock_irqrestore(&self->spinlock, flags);
68373 - self->blocked_open++;
68374 + local_inc(&self->blocked_open);
68375
68376 while (1) {
68377 if (tty->termios->c_cflag & CBAUD) {
68378 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
68379 }
68380
68381 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
68382 - __FILE__,__LINE__, tty->driver->name, self->open_count );
68383 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68384
68385 schedule();
68386 }
68387 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
68388 if (extra_count) {
68389 /* ++ is not atomic, so this should be protected - Jean II */
68390 spin_lock_irqsave(&self->spinlock, flags);
68391 - self->open_count++;
68392 + local_inc(&self->open_count);
68393 spin_unlock_irqrestore(&self->spinlock, flags);
68394 }
68395 - self->blocked_open--;
68396 + local_dec(&self->blocked_open);
68397
68398 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
68399 - __FILE__,__LINE__, tty->driver->name, self->open_count);
68400 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
68401
68402 if (!retval)
68403 self->flags |= ASYNC_NORMAL_ACTIVE;
68404 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
68405 }
68406 /* ++ is not atomic, so this should be protected - Jean II */
68407 spin_lock_irqsave(&self->spinlock, flags);
68408 - self->open_count++;
68409 + local_inc(&self->open_count);
68410
68411 tty->driver_data = self;
68412 self->tty = tty;
68413 spin_unlock_irqrestore(&self->spinlock, flags);
68414
68415 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
68416 - self->line, self->open_count);
68417 + self->line, local_read(&self->open_count));
68418
68419 /* Not really used by us, but lets do it anyway */
68420 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
68421 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
68422 return;
68423 }
68424
68425 - if ((tty->count == 1) && (self->open_count != 1)) {
68426 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
68427 /*
68428 * Uh, oh. tty->count is 1, which means that the tty
68429 * structure will be freed. state->count should always
68430 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
68431 */
68432 IRDA_DEBUG(0, "%s(), bad serial port count; "
68433 "tty->count is 1, state->count is %d\n", __func__ ,
68434 - self->open_count);
68435 - self->open_count = 1;
68436 + local_read(&self->open_count));
68437 + local_set(&self->open_count, 1);
68438 }
68439
68440 - if (--self->open_count < 0) {
68441 + if (local_dec_return(&self->open_count) < 0) {
68442 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
68443 - __func__, self->line, self->open_count);
68444 - self->open_count = 0;
68445 + __func__, self->line, local_read(&self->open_count));
68446 + local_set(&self->open_count, 0);
68447 }
68448 - if (self->open_count) {
68449 + if (local_read(&self->open_count)) {
68450 spin_unlock_irqrestore(&self->spinlock, flags);
68451
68452 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
68453 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
68454 tty->closing = 0;
68455 self->tty = NULL;
68456
68457 - if (self->blocked_open) {
68458 + if (local_read(&self->blocked_open)) {
68459 if (self->close_delay)
68460 schedule_timeout_interruptible(self->close_delay);
68461 wake_up_interruptible(&self->open_wait);
68462 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
68463 spin_lock_irqsave(&self->spinlock, flags);
68464 self->flags &= ~ASYNC_NORMAL_ACTIVE;
68465 self->tty = NULL;
68466 - self->open_count = 0;
68467 + local_set(&self->open_count, 0);
68468 spin_unlock_irqrestore(&self->spinlock, flags);
68469
68470 wake_up_interruptible(&self->open_wait);
68471 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
68472 seq_putc(m, '\n');
68473
68474 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
68475 - seq_printf(m, "Open count: %d\n", self->open_count);
68476 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
68477 seq_printf(m, "Max data size: %d\n", self->max_data_size);
68478 seq_printf(m, "Max header size: %d\n", self->max_header_size);
68479
68480 diff -urNp linux-2.6.32.42/net/iucv/af_iucv.c linux-2.6.32.42/net/iucv/af_iucv.c
68481 --- linux-2.6.32.42/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
68482 +++ linux-2.6.32.42/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
68483 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
68484
68485 write_lock_bh(&iucv_sk_list.lock);
68486
68487 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
68488 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
68489 while (__iucv_get_sock_by_name(name)) {
68490 sprintf(name, "%08x",
68491 - atomic_inc_return(&iucv_sk_list.autobind_name));
68492 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
68493 }
68494
68495 write_unlock_bh(&iucv_sk_list.lock);
68496 diff -urNp linux-2.6.32.42/net/key/af_key.c linux-2.6.32.42/net/key/af_key.c
68497 --- linux-2.6.32.42/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
68498 +++ linux-2.6.32.42/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
68499 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
68500 struct xfrm_migrate m[XFRM_MAX_DEPTH];
68501 struct xfrm_kmaddress k;
68502
68503 + pax_track_stack();
68504 +
68505 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
68506 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
68507 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
68508 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
68509 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
68510 else
68511 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
68512 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68513 + NULL,
68514 +#else
68515 s,
68516 +#endif
68517 atomic_read(&s->sk_refcnt),
68518 sk_rmem_alloc_get(s),
68519 sk_wmem_alloc_get(s),
68520 diff -urNp linux-2.6.32.42/net/mac80211/cfg.c linux-2.6.32.42/net/mac80211/cfg.c
68521 --- linux-2.6.32.42/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
68522 +++ linux-2.6.32.42/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
68523 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
68524 return err;
68525 }
68526
68527 -struct cfg80211_ops mac80211_config_ops = {
68528 +const struct cfg80211_ops mac80211_config_ops = {
68529 .add_virtual_intf = ieee80211_add_iface,
68530 .del_virtual_intf = ieee80211_del_iface,
68531 .change_virtual_intf = ieee80211_change_iface,
68532 diff -urNp linux-2.6.32.42/net/mac80211/cfg.h linux-2.6.32.42/net/mac80211/cfg.h
68533 --- linux-2.6.32.42/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
68534 +++ linux-2.6.32.42/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
68535 @@ -4,6 +4,6 @@
68536 #ifndef __CFG_H
68537 #define __CFG_H
68538
68539 -extern struct cfg80211_ops mac80211_config_ops;
68540 +extern const struct cfg80211_ops mac80211_config_ops;
68541
68542 #endif /* __CFG_H */
68543 diff -urNp linux-2.6.32.42/net/mac80211/debugfs_key.c linux-2.6.32.42/net/mac80211/debugfs_key.c
68544 --- linux-2.6.32.42/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
68545 +++ linux-2.6.32.42/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
68546 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
68547 size_t count, loff_t *ppos)
68548 {
68549 struct ieee80211_key *key = file->private_data;
68550 - int i, res, bufsize = 2 * key->conf.keylen + 2;
68551 + int i, bufsize = 2 * key->conf.keylen + 2;
68552 char *buf = kmalloc(bufsize, GFP_KERNEL);
68553 char *p = buf;
68554 + ssize_t res;
68555 +
68556 + if (buf == NULL)
68557 + return -ENOMEM;
68558
68559 for (i = 0; i < key->conf.keylen; i++)
68560 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
68561 diff -urNp linux-2.6.32.42/net/mac80211/debugfs_sta.c linux-2.6.32.42/net/mac80211/debugfs_sta.c
68562 --- linux-2.6.32.42/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
68563 +++ linux-2.6.32.42/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
68564 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
68565 int i;
68566 struct sta_info *sta = file->private_data;
68567
68568 + pax_track_stack();
68569 +
68570 spin_lock_bh(&sta->lock);
68571 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
68572 sta->ampdu_mlme.dialog_token_allocator + 1);
68573 diff -urNp linux-2.6.32.42/net/mac80211/ieee80211_i.h linux-2.6.32.42/net/mac80211/ieee80211_i.h
68574 --- linux-2.6.32.42/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
68575 +++ linux-2.6.32.42/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
68576 @@ -25,6 +25,7 @@
68577 #include <linux/etherdevice.h>
68578 #include <net/cfg80211.h>
68579 #include <net/mac80211.h>
68580 +#include <asm/local.h>
68581 #include "key.h"
68582 #include "sta_info.h"
68583
68584 @@ -635,7 +636,7 @@ struct ieee80211_local {
68585 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
68586 spinlock_t queue_stop_reason_lock;
68587
68588 - int open_count;
68589 + local_t open_count;
68590 int monitors, cooked_mntrs;
68591 /* number of interfaces with corresponding FIF_ flags */
68592 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
68593 diff -urNp linux-2.6.32.42/net/mac80211/iface.c linux-2.6.32.42/net/mac80211/iface.c
68594 --- linux-2.6.32.42/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
68595 +++ linux-2.6.32.42/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
68596 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
68597 break;
68598 }
68599
68600 - if (local->open_count == 0) {
68601 + if (local_read(&local->open_count) == 0) {
68602 res = drv_start(local);
68603 if (res)
68604 goto err_del_bss;
68605 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
68606 * Validate the MAC address for this device.
68607 */
68608 if (!is_valid_ether_addr(dev->dev_addr)) {
68609 - if (!local->open_count)
68610 + if (!local_read(&local->open_count))
68611 drv_stop(local);
68612 return -EADDRNOTAVAIL;
68613 }
68614 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
68615
68616 hw_reconf_flags |= __ieee80211_recalc_idle(local);
68617
68618 - local->open_count++;
68619 + local_inc(&local->open_count);
68620 if (hw_reconf_flags) {
68621 ieee80211_hw_config(local, hw_reconf_flags);
68622 /*
68623 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
68624 err_del_interface:
68625 drv_remove_interface(local, &conf);
68626 err_stop:
68627 - if (!local->open_count)
68628 + if (!local_read(&local->open_count))
68629 drv_stop(local);
68630 err_del_bss:
68631 sdata->bss = NULL;
68632 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
68633 WARN_ON(!list_empty(&sdata->u.ap.vlans));
68634 }
68635
68636 - local->open_count--;
68637 + local_dec(&local->open_count);
68638
68639 switch (sdata->vif.type) {
68640 case NL80211_IFTYPE_AP_VLAN:
68641 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
68642
68643 ieee80211_recalc_ps(local, -1);
68644
68645 - if (local->open_count == 0) {
68646 + if (local_read(&local->open_count) == 0) {
68647 ieee80211_clear_tx_pending(local);
68648 ieee80211_stop_device(local);
68649
68650 diff -urNp linux-2.6.32.42/net/mac80211/main.c linux-2.6.32.42/net/mac80211/main.c
68651 --- linux-2.6.32.42/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
68652 +++ linux-2.6.32.42/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
68653 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
68654 local->hw.conf.power_level = power;
68655 }
68656
68657 - if (changed && local->open_count) {
68658 + if (changed && local_read(&local->open_count)) {
68659 ret = drv_config(local, changed);
68660 /*
68661 * Goal:
68662 diff -urNp linux-2.6.32.42/net/mac80211/mlme.c linux-2.6.32.42/net/mac80211/mlme.c
68663 --- linux-2.6.32.42/net/mac80211/mlme.c 2011-03-27 14:31:47.000000000 -0400
68664 +++ linux-2.6.32.42/net/mac80211/mlme.c 2011-05-16 21:46:57.000000000 -0400
68665 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
68666 bool have_higher_than_11mbit = false, newsta = false;
68667 u16 ap_ht_cap_flags;
68668
68669 + pax_track_stack();
68670 +
68671 /*
68672 * AssocResp and ReassocResp have identical structure, so process both
68673 * of them in this function.
68674 diff -urNp linux-2.6.32.42/net/mac80211/pm.c linux-2.6.32.42/net/mac80211/pm.c
68675 --- linux-2.6.32.42/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
68676 +++ linux-2.6.32.42/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
68677 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
68678 }
68679
68680 /* stop hardware - this must stop RX */
68681 - if (local->open_count)
68682 + if (local_read(&local->open_count))
68683 ieee80211_stop_device(local);
68684
68685 local->suspended = true;
68686 diff -urNp linux-2.6.32.42/net/mac80211/rate.c linux-2.6.32.42/net/mac80211/rate.c
68687 --- linux-2.6.32.42/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
68688 +++ linux-2.6.32.42/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
68689 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
68690 struct rate_control_ref *ref, *old;
68691
68692 ASSERT_RTNL();
68693 - if (local->open_count)
68694 + if (local_read(&local->open_count))
68695 return -EBUSY;
68696
68697 ref = rate_control_alloc(name, local);
68698 diff -urNp linux-2.6.32.42/net/mac80211/tx.c linux-2.6.32.42/net/mac80211/tx.c
68699 --- linux-2.6.32.42/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
68700 +++ linux-2.6.32.42/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
68701 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
68702 return cpu_to_le16(dur);
68703 }
68704
68705 -static int inline is_ieee80211_device(struct ieee80211_local *local,
68706 +static inline int is_ieee80211_device(struct ieee80211_local *local,
68707 struct net_device *dev)
68708 {
68709 return local == wdev_priv(dev->ieee80211_ptr);
68710 diff -urNp linux-2.6.32.42/net/mac80211/util.c linux-2.6.32.42/net/mac80211/util.c
68711 --- linux-2.6.32.42/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
68712 +++ linux-2.6.32.42/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
68713 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
68714 local->resuming = true;
68715
68716 /* restart hardware */
68717 - if (local->open_count) {
68718 + if (local_read(&local->open_count)) {
68719 /*
68720 * Upon resume hardware can sometimes be goofy due to
68721 * various platform / driver / bus issues, so restarting
68722 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c
68723 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
68724 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
68725 @@ -564,7 +564,7 @@ static const struct file_operations ip_v
68726 .open = ip_vs_app_open,
68727 .read = seq_read,
68728 .llseek = seq_lseek,
68729 - .release = seq_release,
68730 + .release = seq_release_net,
68731 };
68732 #endif
68733
68734 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c
68735 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
68736 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
68737 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
68738 /* if the connection is not template and is created
68739 * by sync, preserve the activity flag.
68740 */
68741 - cp->flags |= atomic_read(&dest->conn_flags) &
68742 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
68743 (~IP_VS_CONN_F_INACTIVE);
68744 else
68745 - cp->flags |= atomic_read(&dest->conn_flags);
68746 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
68747 cp->dest = dest;
68748
68749 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
68750 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
68751 atomic_set(&cp->refcnt, 1);
68752
68753 atomic_set(&cp->n_control, 0);
68754 - atomic_set(&cp->in_pkts, 0);
68755 + atomic_set_unchecked(&cp->in_pkts, 0);
68756
68757 atomic_inc(&ip_vs_conn_count);
68758 if (flags & IP_VS_CONN_F_NO_CPORT)
68759 @@ -871,7 +871,7 @@ static const struct file_operations ip_v
68760 .open = ip_vs_conn_open,
68761 .read = seq_read,
68762 .llseek = seq_lseek,
68763 - .release = seq_release,
68764 + .release = seq_release_net,
68765 };
68766
68767 static const char *ip_vs_origin_name(unsigned flags)
68768 @@ -934,7 +934,7 @@ static const struct file_operations ip_v
68769 .open = ip_vs_conn_sync_open,
68770 .read = seq_read,
68771 .llseek = seq_lseek,
68772 - .release = seq_release,
68773 + .release = seq_release_net,
68774 };
68775
68776 #endif
68777 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
68778
68779 /* Don't drop the entry if its number of incoming packets is not
68780 located in [0, 8] */
68781 - i = atomic_read(&cp->in_pkts);
68782 + i = atomic_read_unchecked(&cp->in_pkts);
68783 if (i > 8 || i < 0) return 0;
68784
68785 if (!todrop_rate[i]) return 0;
68786 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c
68787 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
68788 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
68789 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
68790 ret = cp->packet_xmit(skb, cp, pp);
68791 /* do not touch skb anymore */
68792
68793 - atomic_inc(&cp->in_pkts);
68794 + atomic_inc_unchecked(&cp->in_pkts);
68795 ip_vs_conn_put(cp);
68796 return ret;
68797 }
68798 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
68799 * Sync connection if it is about to close to
68800 * encorage the standby servers to update the connections timeout
68801 */
68802 - pkts = atomic_add_return(1, &cp->in_pkts);
68803 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
68804 if (af == AF_INET &&
68805 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
68806 (((cp->protocol != IPPROTO_TCP ||
68807 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c
68808 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
68809 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
68810 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
68811 ip_vs_rs_hash(dest);
68812 write_unlock_bh(&__ip_vs_rs_lock);
68813 }
68814 - atomic_set(&dest->conn_flags, conn_flags);
68815 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
68816
68817 /* bind the service */
68818 if (!dest->svc) {
68819 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
68820 " %-7s %-6d %-10d %-10d\n",
68821 &dest->addr.in6,
68822 ntohs(dest->port),
68823 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
68824 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
68825 atomic_read(&dest->weight),
68826 atomic_read(&dest->activeconns),
68827 atomic_read(&dest->inactconns));
68828 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
68829 "%-7s %-6d %-10d %-10d\n",
68830 ntohl(dest->addr.ip),
68831 ntohs(dest->port),
68832 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
68833 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
68834 atomic_read(&dest->weight),
68835 atomic_read(&dest->activeconns),
68836 atomic_read(&dest->inactconns));
68837 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
68838 .open = ip_vs_info_open,
68839 .read = seq_read,
68840 .llseek = seq_lseek,
68841 - .release = seq_release_private,
68842 + .release = seq_release_net,
68843 };
68844
68845 #endif
68846 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
68847 .open = ip_vs_stats_seq_open,
68848 .read = seq_read,
68849 .llseek = seq_lseek,
68850 - .release = single_release,
68851 + .release = single_release_net,
68852 };
68853
68854 #endif
68855 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
68856
68857 entry.addr = dest->addr.ip;
68858 entry.port = dest->port;
68859 - entry.conn_flags = atomic_read(&dest->conn_flags);
68860 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
68861 entry.weight = atomic_read(&dest->weight);
68862 entry.u_threshold = dest->u_threshold;
68863 entry.l_threshold = dest->l_threshold;
68864 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
68865 unsigned char arg[128];
68866 int ret = 0;
68867
68868 + pax_track_stack();
68869 +
68870 if (!capable(CAP_NET_ADMIN))
68871 return -EPERM;
68872
68873 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
68874 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
68875
68876 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
68877 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
68878 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
68879 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
68880 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
68881 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
68882 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c
68883 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
68884 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
68885 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const
68886
68887 if (opt)
68888 memcpy(&cp->in_seq, opt, sizeof(*opt));
68889 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
68890 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
68891 cp->state = state;
68892 cp->old_state = cp->state;
68893 /*
68894 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c
68895 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
68896 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
68897 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
68898 else
68899 rc = NF_ACCEPT;
68900 /* do not touch skb anymore */
68901 - atomic_inc(&cp->in_pkts);
68902 + atomic_inc_unchecked(&cp->in_pkts);
68903 goto out;
68904 }
68905
68906 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
68907 else
68908 rc = NF_ACCEPT;
68909 /* do not touch skb anymore */
68910 - atomic_inc(&cp->in_pkts);
68911 + atomic_inc_unchecked(&cp->in_pkts);
68912 goto out;
68913 }
68914
68915 diff -urNp linux-2.6.32.42/net/netfilter/Kconfig linux-2.6.32.42/net/netfilter/Kconfig
68916 --- linux-2.6.32.42/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
68917 +++ linux-2.6.32.42/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
68918 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
68919
68920 To compile it as a module, choose M here. If unsure, say N.
68921
68922 +config NETFILTER_XT_MATCH_GRADM
68923 + tristate '"gradm" match support'
68924 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
68925 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
68926 + ---help---
68927 + The gradm match allows to match on grsecurity RBAC being enabled.
68928 + It is useful when iptables rules are applied early on bootup to
68929 + prevent connections to the machine (except from a trusted host)
68930 + while the RBAC system is disabled.
68931 +
68932 config NETFILTER_XT_MATCH_HASHLIMIT
68933 tristate '"hashlimit" match support'
68934 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
68935 diff -urNp linux-2.6.32.42/net/netfilter/Makefile linux-2.6.32.42/net/netfilter/Makefile
68936 --- linux-2.6.32.42/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
68937 +++ linux-2.6.32.42/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
68938 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
68939 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
68940 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
68941 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
68942 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
68943 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
68944 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
68945 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
68946 diff -urNp linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c
68947 --- linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
68948 +++ linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
68949 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
68950 static int
68951 ctnetlink_parse_tuple(const struct nlattr * const cda[],
68952 struct nf_conntrack_tuple *tuple,
68953 - enum ctattr_tuple type, u_int8_t l3num)
68954 + enum ctattr_type type, u_int8_t l3num)
68955 {
68956 struct nlattr *tb[CTA_TUPLE_MAX+1];
68957 int err;
68958 diff -urNp linux-2.6.32.42/net/netfilter/nfnetlink_log.c linux-2.6.32.42/net/netfilter/nfnetlink_log.c
68959 --- linux-2.6.32.42/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
68960 +++ linux-2.6.32.42/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
68961 @@ -68,7 +68,7 @@ struct nfulnl_instance {
68962 };
68963
68964 static DEFINE_RWLOCK(instances_lock);
68965 -static atomic_t global_seq;
68966 +static atomic_unchecked_t global_seq;
68967
68968 #define INSTANCE_BUCKETS 16
68969 static struct hlist_head instance_table[INSTANCE_BUCKETS];
68970 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
68971 /* global sequence number */
68972 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
68973 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
68974 - htonl(atomic_inc_return(&global_seq)));
68975 + htonl(atomic_inc_return_unchecked(&global_seq)));
68976
68977 if (data_len) {
68978 struct nlattr *nla;
68979 diff -urNp linux-2.6.32.42/net/netfilter/xt_gradm.c linux-2.6.32.42/net/netfilter/xt_gradm.c
68980 --- linux-2.6.32.42/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
68981 +++ linux-2.6.32.42/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
68982 @@ -0,0 +1,51 @@
68983 +/*
68984 + * gradm match for netfilter
68985 + * Copyright © Zbigniew Krzystolik, 2010
68986 + *
68987 + * This program is free software; you can redistribute it and/or modify
68988 + * it under the terms of the GNU General Public License; either version
68989 + * 2 or 3 as published by the Free Software Foundation.
68990 + */
68991 +#include <linux/module.h>
68992 +#include <linux/moduleparam.h>
68993 +#include <linux/skbuff.h>
68994 +#include <linux/netfilter/x_tables.h>
68995 +#include <linux/grsecurity.h>
68996 +#include <linux/netfilter/xt_gradm.h>
68997 +
68998 +static bool
68999 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
69000 +{
69001 + const struct xt_gradm_mtinfo *info = par->matchinfo;
69002 + bool retval = false;
69003 + if (gr_acl_is_enabled())
69004 + retval = true;
69005 + return retval ^ info->invflags;
69006 +}
69007 +
69008 +static struct xt_match gradm_mt_reg __read_mostly = {
69009 + .name = "gradm",
69010 + .revision = 0,
69011 + .family = NFPROTO_UNSPEC,
69012 + .match = gradm_mt,
69013 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
69014 + .me = THIS_MODULE,
69015 +};
69016 +
69017 +static int __init gradm_mt_init(void)
69018 +{
69019 + return xt_register_match(&gradm_mt_reg);
69020 +}
69021 +
69022 +static void __exit gradm_mt_exit(void)
69023 +{
69024 + xt_unregister_match(&gradm_mt_reg);
69025 +}
69026 +
69027 +module_init(gradm_mt_init);
69028 +module_exit(gradm_mt_exit);
69029 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
69030 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
69031 +MODULE_LICENSE("GPL");
69032 +MODULE_ALIAS("ipt_gradm");
69033 +MODULE_ALIAS("ip6t_gradm");
69034 diff -urNp linux-2.6.32.42/net/netlink/af_netlink.c linux-2.6.32.42/net/netlink/af_netlink.c
69035 --- linux-2.6.32.42/net/netlink/af_netlink.c 2011-03-27 14:31:47.000000000 -0400
69036 +++ linux-2.6.32.42/net/netlink/af_netlink.c 2011-05-04 17:56:28.000000000 -0400
69037 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock
69038 sk->sk_error_report(sk);
69039 }
69040 }
69041 - atomic_inc(&sk->sk_drops);
69042 + atomic_inc_unchecked(&sk->sk_drops);
69043 }
69044
69045 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
69046 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_f
69047 struct netlink_sock *nlk = nlk_sk(s);
69048
69049 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
69050 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69051 + NULL,
69052 +#else
69053 s,
69054 +#endif
69055 s->sk_protocol,
69056 nlk->pid,
69057 nlk->groups ? (u32)nlk->groups[0] : 0,
69058 sk_rmem_alloc_get(s),
69059 sk_wmem_alloc_get(s),
69060 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69061 + NULL,
69062 +#else
69063 nlk->cb,
69064 +#endif
69065 atomic_read(&s->sk_refcnt),
69066 - atomic_read(&s->sk_drops)
69067 + atomic_read_unchecked(&s->sk_drops)
69068 );
69069
69070 }
69071 diff -urNp linux-2.6.32.42/net/netrom/af_netrom.c linux-2.6.32.42/net/netrom/af_netrom.c
69072 --- linux-2.6.32.42/net/netrom/af_netrom.c 2011-03-27 14:31:47.000000000 -0400
69073 +++ linux-2.6.32.42/net/netrom/af_netrom.c 2011-04-17 15:56:46.000000000 -0400
69074 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *soc
69075 struct sock *sk = sock->sk;
69076 struct nr_sock *nr = nr_sk(sk);
69077
69078 + memset(sax, 0, sizeof(*sax));
69079 lock_sock(sk);
69080 if (peer != 0) {
69081 if (sk->sk_state != TCP_ESTABLISHED) {
69082 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *soc
69083 *uaddr_len = sizeof(struct full_sockaddr_ax25);
69084 } else {
69085 sax->fsa_ax25.sax25_family = AF_NETROM;
69086 - sax->fsa_ax25.sax25_ndigis = 0;
69087 sax->fsa_ax25.sax25_call = nr->source_addr;
69088 *uaddr_len = sizeof(struct sockaddr_ax25);
69089 }
69090 diff -urNp linux-2.6.32.42/net/packet/af_packet.c linux-2.6.32.42/net/packet/af_packet.c
69091 --- linux-2.6.32.42/net/packet/af_packet.c 2011-04-17 17:00:52.000000000 -0400
69092 +++ linux-2.6.32.42/net/packet/af_packet.c 2011-04-17 15:56:46.000000000 -0400
69093 @@ -2427,7 +2427,11 @@ static int packet_seq_show(struct seq_fi
69094
69095 seq_printf(seq,
69096 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
69097 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69098 + NULL,
69099 +#else
69100 s,
69101 +#endif
69102 atomic_read(&s->sk_refcnt),
69103 s->sk_type,
69104 ntohs(po->num),
69105 diff -urNp linux-2.6.32.42/net/phonet/af_phonet.c linux-2.6.32.42/net/phonet/af_phonet.c
69106 --- linux-2.6.32.42/net/phonet/af_phonet.c 2011-03-27 14:31:47.000000000 -0400
69107 +++ linux-2.6.32.42/net/phonet/af_phonet.c 2011-04-17 15:56:46.000000000 -0400
69108 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
69109 {
69110 struct phonet_protocol *pp;
69111
69112 - if (protocol >= PHONET_NPROTO)
69113 + if (protocol < 0 || protocol >= PHONET_NPROTO)
69114 return NULL;
69115
69116 spin_lock(&proto_tab_lock);
69117 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_regist
69118 {
69119 int err = 0;
69120
69121 - if (protocol >= PHONET_NPROTO)
69122 + if (protocol < 0 || protocol >= PHONET_NPROTO)
69123 return -EINVAL;
69124
69125 err = proto_register(pp->prot, 1);
69126 diff -urNp linux-2.6.32.42/net/phonet/datagram.c linux-2.6.32.42/net/phonet/datagram.c
69127 --- linux-2.6.32.42/net/phonet/datagram.c 2011-03-27 14:31:47.000000000 -0400
69128 +++ linux-2.6.32.42/net/phonet/datagram.c 2011-05-04 17:56:28.000000000 -0400
69129 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *s
69130 if (err < 0) {
69131 kfree_skb(skb);
69132 if (err == -ENOMEM)
69133 - atomic_inc(&sk->sk_drops);
69134 + atomic_inc_unchecked(&sk->sk_drops);
69135 }
69136 return err ? NET_RX_DROP : NET_RX_SUCCESS;
69137 }
69138 diff -urNp linux-2.6.32.42/net/phonet/pep.c linux-2.6.32.42/net/phonet/pep.c
69139 --- linux-2.6.32.42/net/phonet/pep.c 2011-03-27 14:31:47.000000000 -0400
69140 +++ linux-2.6.32.42/net/phonet/pep.c 2011-05-04 17:56:28.000000000 -0400
69141 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk,
69142
69143 case PNS_PEP_CTRL_REQ:
69144 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
69145 - atomic_inc(&sk->sk_drops);
69146 + atomic_inc_unchecked(&sk->sk_drops);
69147 break;
69148 }
69149 __skb_pull(skb, 4);
69150 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk,
69151 if (!err)
69152 return 0;
69153 if (err == -ENOMEM)
69154 - atomic_inc(&sk->sk_drops);
69155 + atomic_inc_unchecked(&sk->sk_drops);
69156 break;
69157 }
69158
69159 if (pn->rx_credits == 0) {
69160 - atomic_inc(&sk->sk_drops);
69161 + atomic_inc_unchecked(&sk->sk_drops);
69162 err = -ENOBUFS;
69163 break;
69164 }
69165 diff -urNp linux-2.6.32.42/net/phonet/socket.c linux-2.6.32.42/net/phonet/socket.c
69166 --- linux-2.6.32.42/net/phonet/socket.c 2011-03-27 14:31:47.000000000 -0400
69167 +++ linux-2.6.32.42/net/phonet/socket.c 2011-05-04 17:57:07.000000000 -0400
69168 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_f
69169 sk->sk_state,
69170 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
69171 sock_i_uid(sk), sock_i_ino(sk),
69172 - atomic_read(&sk->sk_refcnt), sk,
69173 - atomic_read(&sk->sk_drops), &len);
69174 + atomic_read(&sk->sk_refcnt),
69175 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69176 + NULL,
69177 +#else
69178 + sk,
69179 +#endif
69180 + atomic_read_unchecked(&sk->sk_drops), &len);
69181 }
69182 seq_printf(seq, "%*s\n", 127 - len, "");
69183 return 0;
69184 diff -urNp linux-2.6.32.42/net/rds/cong.c linux-2.6.32.42/net/rds/cong.c
69185 --- linux-2.6.32.42/net/rds/cong.c 2011-03-27 14:31:47.000000000 -0400
69186 +++ linux-2.6.32.42/net/rds/cong.c 2011-05-04 17:56:28.000000000 -0400
69187 @@ -77,7 +77,7 @@
69188 * finds that the saved generation number is smaller than the global generation
69189 * number, it wakes up the process.
69190 */
69191 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
69192 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
69193
69194 /*
69195 * Congestion monitoring
69196 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
69197 rdsdebug("waking map %p for %pI4\n",
69198 map, &map->m_addr);
69199 rds_stats_inc(s_cong_update_received);
69200 - atomic_inc(&rds_cong_generation);
69201 + atomic_inc_unchecked(&rds_cong_generation);
69202 if (waitqueue_active(&map->m_waitq))
69203 wake_up(&map->m_waitq);
69204 if (waitqueue_active(&rds_poll_waitq))
69205 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
69206
69207 int rds_cong_updated_since(unsigned long *recent)
69208 {
69209 - unsigned long gen = atomic_read(&rds_cong_generation);
69210 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
69211
69212 if (likely(*recent == gen))
69213 return 0;
69214 diff -urNp linux-2.6.32.42/net/rds/iw_rdma.c linux-2.6.32.42/net/rds/iw_rdma.c
69215 --- linux-2.6.32.42/net/rds/iw_rdma.c 2011-03-27 14:31:47.000000000 -0400
69216 +++ linux-2.6.32.42/net/rds/iw_rdma.c 2011-05-16 21:46:57.000000000 -0400
69217 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
69218 struct rdma_cm_id *pcm_id;
69219 int rc;
69220
69221 + pax_track_stack();
69222 +
69223 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
69224 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
69225
69226 diff -urNp linux-2.6.32.42/net/rds/Kconfig linux-2.6.32.42/net/rds/Kconfig
69227 --- linux-2.6.32.42/net/rds/Kconfig 2011-03-27 14:31:47.000000000 -0400
69228 +++ linux-2.6.32.42/net/rds/Kconfig 2011-04-17 15:56:46.000000000 -0400
69229 @@ -1,7 +1,7 @@
69230
69231 config RDS
69232 tristate "The RDS Protocol (EXPERIMENTAL)"
69233 - depends on INET && EXPERIMENTAL
69234 + depends on INET && EXPERIMENTAL && BROKEN
69235 ---help---
69236 The RDS (Reliable Datagram Sockets) protocol provides reliable,
69237 sequenced delivery of datagrams over Infiniband, iWARP,
69238 diff -urNp linux-2.6.32.42/net/rxrpc/af_rxrpc.c linux-2.6.32.42/net/rxrpc/af_rxrpc.c
69239 --- linux-2.6.32.42/net/rxrpc/af_rxrpc.c 2011-03-27 14:31:47.000000000 -0400
69240 +++ linux-2.6.32.42/net/rxrpc/af_rxrpc.c 2011-05-04 17:56:28.000000000 -0400
69241 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_
69242 __be32 rxrpc_epoch;
69243
69244 /* current debugging ID */
69245 -atomic_t rxrpc_debug_id;
69246 +atomic_unchecked_t rxrpc_debug_id;
69247
69248 /* count of skbs currently in use */
69249 atomic_t rxrpc_n_skbs;
69250 diff -urNp linux-2.6.32.42/net/rxrpc/ar-ack.c linux-2.6.32.42/net/rxrpc/ar-ack.c
69251 --- linux-2.6.32.42/net/rxrpc/ar-ack.c 2011-03-27 14:31:47.000000000 -0400
69252 +++ linux-2.6.32.42/net/rxrpc/ar-ack.c 2011-05-16 21:46:57.000000000 -0400
69253 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_ca
69254
69255 _enter("{%d,%d,%d,%d},",
69256 call->acks_hard, call->acks_unacked,
69257 - atomic_read(&call->sequence),
69258 + atomic_read_unchecked(&call->sequence),
69259 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
69260
69261 stop = 0;
69262 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_ca
69263
69264 /* each Tx packet has a new serial number */
69265 sp->hdr.serial =
69266 - htonl(atomic_inc_return(&call->conn->serial));
69267 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
69268
69269 hdr = (struct rxrpc_header *) txb->head;
69270 hdr->serial = sp->hdr.serial;
69271 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struc
69272 */
69273 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
69274 {
69275 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
69276 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
69277 }
69278
69279 /*
69280 @@ -627,7 +627,7 @@ process_further:
69281
69282 latest = ntohl(sp->hdr.serial);
69283 hard = ntohl(ack.firstPacket);
69284 - tx = atomic_read(&call->sequence);
69285 + tx = atomic_read_unchecked(&call->sequence);
69286
69287 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
69288 latest,
69289 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_stru
69290 u32 abort_code = RX_PROTOCOL_ERROR;
69291 u8 *acks = NULL;
69292
69293 + pax_track_stack();
69294 +
69295 //printk("\n--------------------\n");
69296 _enter("{%d,%s,%lx} [%lu]",
69297 call->debug_id, rxrpc_call_states[call->state], call->events,
69298 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_stru
69299 goto maybe_reschedule;
69300
69301 send_ACK_with_skew:
69302 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
69303 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
69304 ntohl(ack.serial));
69305 send_ACK:
69306 mtu = call->conn->trans->peer->if_mtu;
69307 @@ -1171,7 +1173,7 @@ send_ACK:
69308 ackinfo.rxMTU = htonl(5692);
69309 ackinfo.jumbo_max = htonl(4);
69310
69311 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
69312 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
69313 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
69314 ntohl(hdr.serial),
69315 ntohs(ack.maxSkew),
69316 @@ -1189,7 +1191,7 @@ send_ACK:
69317 send_message:
69318 _debug("send message");
69319
69320 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
69321 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
69322 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
69323 send_message_2:
69324
69325 diff -urNp linux-2.6.32.42/net/rxrpc/ar-call.c linux-2.6.32.42/net/rxrpc/ar-call.c
69326 --- linux-2.6.32.42/net/rxrpc/ar-call.c 2011-03-27 14:31:47.000000000 -0400
69327 +++ linux-2.6.32.42/net/rxrpc/ar-call.c 2011-05-04 17:56:28.000000000 -0400
69328 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
69329 spin_lock_init(&call->lock);
69330 rwlock_init(&call->state_lock);
69331 atomic_set(&call->usage, 1);
69332 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
69333 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69334 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
69335
69336 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
69337 diff -urNp linux-2.6.32.42/net/rxrpc/ar-connection.c linux-2.6.32.42/net/rxrpc/ar-connection.c
69338 --- linux-2.6.32.42/net/rxrpc/ar-connection.c 2011-03-27 14:31:47.000000000 -0400
69339 +++ linux-2.6.32.42/net/rxrpc/ar-connection.c 2011-05-04 17:56:28.000000000 -0400
69340 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_al
69341 rwlock_init(&conn->lock);
69342 spin_lock_init(&conn->state_lock);
69343 atomic_set(&conn->usage, 1);
69344 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
69345 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69346 conn->avail_calls = RXRPC_MAXCALLS;
69347 conn->size_align = 4;
69348 conn->header_size = sizeof(struct rxrpc_header);
69349 diff -urNp linux-2.6.32.42/net/rxrpc/ar-connevent.c linux-2.6.32.42/net/rxrpc/ar-connevent.c
69350 --- linux-2.6.32.42/net/rxrpc/ar-connevent.c 2011-03-27 14:31:47.000000000 -0400
69351 +++ linux-2.6.32.42/net/rxrpc/ar-connevent.c 2011-05-04 17:56:28.000000000 -0400
69352 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
69353
69354 len = iov[0].iov_len + iov[1].iov_len;
69355
69356 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
69357 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69358 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
69359
69360 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
69361 diff -urNp linux-2.6.32.42/net/rxrpc/ar-input.c linux-2.6.32.42/net/rxrpc/ar-input.c
69362 --- linux-2.6.32.42/net/rxrpc/ar-input.c 2011-03-27 14:31:47.000000000 -0400
69363 +++ linux-2.6.32.42/net/rxrpc/ar-input.c 2011-05-04 17:56:28.000000000 -0400
69364 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rx
69365 /* track the latest serial number on this connection for ACK packet
69366 * information */
69367 serial = ntohl(sp->hdr.serial);
69368 - hi_serial = atomic_read(&call->conn->hi_serial);
69369 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
69370 while (serial > hi_serial)
69371 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
69372 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
69373 serial);
69374
69375 /* request ACK generation for any ACK or DATA packet that requests
69376 diff -urNp linux-2.6.32.42/net/rxrpc/ar-internal.h linux-2.6.32.42/net/rxrpc/ar-internal.h
69377 --- linux-2.6.32.42/net/rxrpc/ar-internal.h 2011-03-27 14:31:47.000000000 -0400
69378 +++ linux-2.6.32.42/net/rxrpc/ar-internal.h 2011-05-04 17:56:28.000000000 -0400
69379 @@ -272,8 +272,8 @@ struct rxrpc_connection {
69380 int error; /* error code for local abort */
69381 int debug_id; /* debug ID for printks */
69382 unsigned call_counter; /* call ID counter */
69383 - atomic_t serial; /* packet serial number counter */
69384 - atomic_t hi_serial; /* highest serial number received */
69385 + atomic_unchecked_t serial; /* packet serial number counter */
69386 + atomic_unchecked_t hi_serial; /* highest serial number received */
69387 u8 avail_calls; /* number of calls available */
69388 u8 size_align; /* data size alignment (for security) */
69389 u8 header_size; /* rxrpc + security header size */
69390 @@ -346,7 +346,7 @@ struct rxrpc_call {
69391 spinlock_t lock;
69392 rwlock_t state_lock; /* lock for state transition */
69393 atomic_t usage;
69394 - atomic_t sequence; /* Tx data packet sequence counter */
69395 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
69396 u32 abort_code; /* local/remote abort code */
69397 enum { /* current state of call */
69398 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
69399 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
69400 */
69401 extern atomic_t rxrpc_n_skbs;
69402 extern __be32 rxrpc_epoch;
69403 -extern atomic_t rxrpc_debug_id;
69404 +extern atomic_unchecked_t rxrpc_debug_id;
69405 extern struct workqueue_struct *rxrpc_workqueue;
69406
69407 /*
69408 diff -urNp linux-2.6.32.42/net/rxrpc/ar-key.c linux-2.6.32.42/net/rxrpc/ar-key.c
69409 --- linux-2.6.32.42/net/rxrpc/ar-key.c 2011-03-27 14:31:47.000000000 -0400
69410 +++ linux-2.6.32.42/net/rxrpc/ar-key.c 2011-04-17 15:56:46.000000000 -0400
69411 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(s
69412 return ret;
69413
69414 plen -= sizeof(*token);
69415 - token = kmalloc(sizeof(*token), GFP_KERNEL);
69416 + token = kzalloc(sizeof(*token), GFP_KERNEL);
69417 if (!token)
69418 return -ENOMEM;
69419
69420 - token->kad = kmalloc(plen, GFP_KERNEL);
69421 + token->kad = kzalloc(plen, GFP_KERNEL);
69422 if (!token->kad) {
69423 kfree(token);
69424 return -ENOMEM;
69425 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key
69426 goto error;
69427
69428 ret = -ENOMEM;
69429 - token = kmalloc(sizeof(*token), GFP_KERNEL);
69430 + token = kzalloc(sizeof(*token), GFP_KERNEL);
69431 if (!token)
69432 goto error;
69433 - token->kad = kmalloc(plen, GFP_KERNEL);
69434 + token->kad = kzalloc(plen, GFP_KERNEL);
69435 if (!token->kad)
69436 goto error_free;
69437
69438 diff -urNp linux-2.6.32.42/net/rxrpc/ar-local.c linux-2.6.32.42/net/rxrpc/ar-local.c
69439 --- linux-2.6.32.42/net/rxrpc/ar-local.c 2011-03-27 14:31:47.000000000 -0400
69440 +++ linux-2.6.32.42/net/rxrpc/ar-local.c 2011-05-04 17:56:28.000000000 -0400
69441 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
69442 spin_lock_init(&local->lock);
69443 rwlock_init(&local->services_lock);
69444 atomic_set(&local->usage, 1);
69445 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
69446 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69447 memcpy(&local->srx, srx, sizeof(*srx));
69448 }
69449
69450 diff -urNp linux-2.6.32.42/net/rxrpc/ar-output.c linux-2.6.32.42/net/rxrpc/ar-output.c
69451 --- linux-2.6.32.42/net/rxrpc/ar-output.c 2011-03-27 14:31:47.000000000 -0400
69452 +++ linux-2.6.32.42/net/rxrpc/ar-output.c 2011-05-04 17:56:28.000000000 -0400
69453 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb
69454 sp->hdr.cid = call->cid;
69455 sp->hdr.callNumber = call->call_id;
69456 sp->hdr.seq =
69457 - htonl(atomic_inc_return(&call->sequence));
69458 + htonl(atomic_inc_return_unchecked(&call->sequence));
69459 sp->hdr.serial =
69460 - htonl(atomic_inc_return(&conn->serial));
69461 + htonl(atomic_inc_return_unchecked(&conn->serial));
69462 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
69463 sp->hdr.userStatus = 0;
69464 sp->hdr.securityIndex = conn->security_ix;
69465 diff -urNp linux-2.6.32.42/net/rxrpc/ar-peer.c linux-2.6.32.42/net/rxrpc/ar-peer.c
69466 --- linux-2.6.32.42/net/rxrpc/ar-peer.c 2011-03-27 14:31:47.000000000 -0400
69467 +++ linux-2.6.32.42/net/rxrpc/ar-peer.c 2011-05-04 17:56:28.000000000 -0400
69468 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
69469 INIT_LIST_HEAD(&peer->error_targets);
69470 spin_lock_init(&peer->lock);
69471 atomic_set(&peer->usage, 1);
69472 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
69473 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69474 memcpy(&peer->srx, srx, sizeof(*srx));
69475
69476 rxrpc_assess_MTU_size(peer);
69477 diff -urNp linux-2.6.32.42/net/rxrpc/ar-proc.c linux-2.6.32.42/net/rxrpc/ar-proc.c
69478 --- linux-2.6.32.42/net/rxrpc/ar-proc.c 2011-03-27 14:31:47.000000000 -0400
69479 +++ linux-2.6.32.42/net/rxrpc/ar-proc.c 2011-05-04 17:56:28.000000000 -0400
69480 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
69481 atomic_read(&conn->usage),
69482 rxrpc_conn_states[conn->state],
69483 key_serial(conn->key),
69484 - atomic_read(&conn->serial),
69485 - atomic_read(&conn->hi_serial));
69486 + atomic_read_unchecked(&conn->serial),
69487 + atomic_read_unchecked(&conn->hi_serial));
69488
69489 return 0;
69490 }
69491 diff -urNp linux-2.6.32.42/net/rxrpc/ar-transport.c linux-2.6.32.42/net/rxrpc/ar-transport.c
69492 --- linux-2.6.32.42/net/rxrpc/ar-transport.c 2011-03-27 14:31:47.000000000 -0400
69493 +++ linux-2.6.32.42/net/rxrpc/ar-transport.c 2011-05-04 17:56:28.000000000 -0400
69494 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_all
69495 spin_lock_init(&trans->client_lock);
69496 rwlock_init(&trans->conn_lock);
69497 atomic_set(&trans->usage, 1);
69498 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
69499 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69500
69501 if (peer->srx.transport.family == AF_INET) {
69502 switch (peer->srx.transport_type) {
69503 diff -urNp linux-2.6.32.42/net/rxrpc/rxkad.c linux-2.6.32.42/net/rxrpc/rxkad.c
69504 --- linux-2.6.32.42/net/rxrpc/rxkad.c 2011-03-27 14:31:47.000000000 -0400
69505 +++ linux-2.6.32.42/net/rxrpc/rxkad.c 2011-05-16 21:46:57.000000000 -0400
69506 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(c
69507 u16 check;
69508 int nsg;
69509
69510 + pax_track_stack();
69511 +
69512 sp = rxrpc_skb(skb);
69513
69514 _enter("");
69515 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(cons
69516 u16 check;
69517 int nsg;
69518
69519 + pax_track_stack();
69520 +
69521 _enter("");
69522
69523 sp = rxrpc_skb(skb);
69524 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct
69525
69526 len = iov[0].iov_len + iov[1].iov_len;
69527
69528 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
69529 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69530 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
69531
69532 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
69533 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rx
69534
69535 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
69536
69537 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
69538 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69539 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
69540
69541 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
69542 diff -urNp linux-2.6.32.42/net/sctp/proc.c linux-2.6.32.42/net/sctp/proc.c
69543 --- linux-2.6.32.42/net/sctp/proc.c 2011-03-27 14:31:47.000000000 -0400
69544 +++ linux-2.6.32.42/net/sctp/proc.c 2011-04-17 15:56:46.000000000 -0400
69545 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_
69546 sctp_for_each_hentry(epb, node, &head->chain) {
69547 ep = sctp_ep(epb);
69548 sk = epb->sk;
69549 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
69550 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
69551 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69552 + NULL, NULL,
69553 +#else
69554 + ep, sk,
69555 +#endif
69556 sctp_sk(sk)->type, sk->sk_state, hash,
69557 epb->bind_addr.port,
69558 sock_i_uid(sk), sock_i_ino(sk));
69559 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct s
69560 seq_printf(seq,
69561 "%8p %8p %-3d %-3d %-2d %-4d "
69562 "%4d %8d %8d %7d %5lu %-5d %5d ",
69563 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
69564 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69565 + NULL, NULL,
69566 +#else
69567 + assoc, sk,
69568 +#endif
69569 + sctp_sk(sk)->type, sk->sk_state,
69570 assoc->state, hash,
69571 assoc->assoc_id,
69572 assoc->sndbuf_used,
69573 diff -urNp linux-2.6.32.42/net/sctp/socket.c linux-2.6.32.42/net/sctp/socket.c
69574 --- linux-2.6.32.42/net/sctp/socket.c 2011-03-27 14:31:47.000000000 -0400
69575 +++ linux-2.6.32.42/net/sctp/socket.c 2011-04-23 12:56:11.000000000 -0400
69576 @@ -5802,7 +5802,6 @@ pp_found:
69577 */
69578 int reuse = sk->sk_reuse;
69579 struct sock *sk2;
69580 - struct hlist_node *node;
69581
69582 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
69583 if (pp->fastreuse && sk->sk_reuse &&
69584 diff -urNp linux-2.6.32.42/net/socket.c linux-2.6.32.42/net/socket.c
69585 --- linux-2.6.32.42/net/socket.c 2011-03-27 14:31:47.000000000 -0400
69586 +++ linux-2.6.32.42/net/socket.c 2011-05-16 21:46:57.000000000 -0400
69587 @@ -87,6 +87,7 @@
69588 #include <linux/wireless.h>
69589 #include <linux/nsproxy.h>
69590 #include <linux/magic.h>
69591 +#include <linux/in.h>
69592
69593 #include <asm/uaccess.h>
69594 #include <asm/unistd.h>
69595 @@ -97,6 +98,21 @@
69596 #include <net/sock.h>
69597 #include <linux/netfilter.h>
69598
69599 +extern void gr_attach_curr_ip(const struct sock *sk);
69600 +extern int gr_handle_sock_all(const int family, const int type,
69601 + const int protocol);
69602 +extern int gr_handle_sock_server(const struct sockaddr *sck);
69603 +extern int gr_handle_sock_server_other(const struct sock *sck);
69604 +extern int gr_handle_sock_client(const struct sockaddr *sck);
69605 +extern int gr_search_connect(struct socket * sock,
69606 + struct sockaddr_in * addr);
69607 +extern int gr_search_bind(struct socket * sock,
69608 + struct sockaddr_in * addr);
69609 +extern int gr_search_listen(struct socket * sock);
69610 +extern int gr_search_accept(struct socket * sock);
69611 +extern int gr_search_socket(const int domain, const int type,
69612 + const int protocol);
69613 +
69614 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
69615 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
69616 unsigned long nr_segs, loff_t pos);
69617 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_sys
69618 mnt);
69619 }
69620
69621 -static struct vfsmount *sock_mnt __read_mostly;
69622 +struct vfsmount *sock_mnt __read_mostly;
69623
69624 static struct file_system_type sock_fs_type = {
69625 .name = "sockfs",
69626 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net
69627 return -EAFNOSUPPORT;
69628 if (type < 0 || type >= SOCK_MAX)
69629 return -EINVAL;
69630 + if (protocol < 0)
69631 + return -EINVAL;
69632
69633 /* Compatibility.
69634
69635 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int
69636 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
69637 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
69638
69639 + if(!gr_search_socket(family, type, protocol)) {
69640 + retval = -EACCES;
69641 + goto out;
69642 + }
69643 +
69644 + if (gr_handle_sock_all(family, type, protocol)) {
69645 + retval = -EACCES;
69646 + goto out;
69647 + }
69648 +
69649 retval = sock_create(family, type, protocol, &sock);
69650 if (retval < 0)
69651 goto out;
69652 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
69653 if (sock) {
69654 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
69655 if (err >= 0) {
69656 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
69657 + err = -EACCES;
69658 + goto error;
69659 + }
69660 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
69661 + if (err)
69662 + goto error;
69663 +
69664 err = security_socket_bind(sock,
69665 (struct sockaddr *)&address,
69666 addrlen);
69667 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
69668 (struct sockaddr *)
69669 &address, addrlen);
69670 }
69671 +error:
69672 fput_light(sock->file, fput_needed);
69673 }
69674 return err;
69675 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
69676 if ((unsigned)backlog > somaxconn)
69677 backlog = somaxconn;
69678
69679 + if (gr_handle_sock_server_other(sock->sk)) {
69680 + err = -EPERM;
69681 + goto error;
69682 + }
69683 +
69684 + err = gr_search_listen(sock);
69685 + if (err)
69686 + goto error;
69687 +
69688 err = security_socket_listen(sock, backlog);
69689 if (!err)
69690 err = sock->ops->listen(sock, backlog);
69691
69692 +error:
69693 fput_light(sock->file, fput_needed);
69694 }
69695 return err;
69696 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
69697 newsock->type = sock->type;
69698 newsock->ops = sock->ops;
69699
69700 + if (gr_handle_sock_server_other(sock->sk)) {
69701 + err = -EPERM;
69702 + sock_release(newsock);
69703 + goto out_put;
69704 + }
69705 +
69706 + err = gr_search_accept(sock);
69707 + if (err) {
69708 + sock_release(newsock);
69709 + goto out_put;
69710 + }
69711 +
69712 /*
69713 * We don't need try_module_get here, as the listening socket (sock)
69714 * has the protocol module (sock->ops->owner) held.
69715 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
69716 fd_install(newfd, newfile);
69717 err = newfd;
69718
69719 + gr_attach_curr_ip(newsock->sk);
69720 +
69721 out_put:
69722 fput_light(sock->file, fput_needed);
69723 out:
69724 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
69725 int, addrlen)
69726 {
69727 struct socket *sock;
69728 + struct sockaddr *sck;
69729 struct sockaddr_storage address;
69730 int err, fput_needed;
69731
69732 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
69733 if (err < 0)
69734 goto out_put;
69735
69736 + sck = (struct sockaddr *)&address;
69737 +
69738 + if (gr_handle_sock_client(sck)) {
69739 + err = -EACCES;
69740 + goto out_put;
69741 + }
69742 +
69743 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
69744 + if (err)
69745 + goto out_put;
69746 +
69747 err =
69748 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
69749 if (err)
69750 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
69751 int err, ctl_len, iov_size, total_len;
69752 int fput_needed;
69753
69754 + pax_track_stack();
69755 +
69756 err = -EFAULT;
69757 if (MSG_CMSG_COMPAT & flags) {
69758 if (get_compat_msghdr(&msg_sys, msg_compat))
69759 diff -urNp linux-2.6.32.42/net/sunrpc/sched.c linux-2.6.32.42/net/sunrpc/sched.c
69760 --- linux-2.6.32.42/net/sunrpc/sched.c 2011-03-27 14:31:47.000000000 -0400
69761 +++ linux-2.6.32.42/net/sunrpc/sched.c 2011-04-17 15:56:46.000000000 -0400
69762 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *w
69763 #ifdef RPC_DEBUG
69764 static void rpc_task_set_debuginfo(struct rpc_task *task)
69765 {
69766 - static atomic_t rpc_pid;
69767 + static atomic_unchecked_t rpc_pid;
69768
69769 task->tk_magic = RPC_TASK_MAGIC_ID;
69770 - task->tk_pid = atomic_inc_return(&rpc_pid);
69771 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
69772 }
69773 #else
69774 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
69775 diff -urNp linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma.c
69776 --- linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma.c 2011-03-27 14:31:47.000000000 -0400
69777 +++ linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-04 17:56:20.000000000 -0400
69778 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCR
69779 static unsigned int min_max_inline = 4096;
69780 static unsigned int max_max_inline = 65536;
69781
69782 -atomic_t rdma_stat_recv;
69783 -atomic_t rdma_stat_read;
69784 -atomic_t rdma_stat_write;
69785 -atomic_t rdma_stat_sq_starve;
69786 -atomic_t rdma_stat_rq_starve;
69787 -atomic_t rdma_stat_rq_poll;
69788 -atomic_t rdma_stat_rq_prod;
69789 -atomic_t rdma_stat_sq_poll;
69790 -atomic_t rdma_stat_sq_prod;
69791 +atomic_unchecked_t rdma_stat_recv;
69792 +atomic_unchecked_t rdma_stat_read;
69793 +atomic_unchecked_t rdma_stat_write;
69794 +atomic_unchecked_t rdma_stat_sq_starve;
69795 +atomic_unchecked_t rdma_stat_rq_starve;
69796 +atomic_unchecked_t rdma_stat_rq_poll;
69797 +atomic_unchecked_t rdma_stat_rq_prod;
69798 +atomic_unchecked_t rdma_stat_sq_poll;
69799 +atomic_unchecked_t rdma_stat_sq_prod;
69800
69801 /* Temporary NFS request map and context caches */
69802 struct kmem_cache *svc_rdma_map_cachep;
69803 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *ta
69804 len -= *ppos;
69805 if (len > *lenp)
69806 len = *lenp;
69807 - if (len && copy_to_user(buffer, str_buf, len))
69808 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
69809 return -EFAULT;
69810 *lenp = len;
69811 *ppos += len;
69812 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] =
69813 {
69814 .procname = "rdma_stat_read",
69815 .data = &rdma_stat_read,
69816 - .maxlen = sizeof(atomic_t),
69817 + .maxlen = sizeof(atomic_unchecked_t),
69818 .mode = 0644,
69819 .proc_handler = &read_reset_stat,
69820 },
69821 {
69822 .procname = "rdma_stat_recv",
69823 .data = &rdma_stat_recv,
69824 - .maxlen = sizeof(atomic_t),
69825 + .maxlen = sizeof(atomic_unchecked_t),
69826 .mode = 0644,
69827 .proc_handler = &read_reset_stat,
69828 },
69829 {
69830 .procname = "rdma_stat_write",
69831 .data = &rdma_stat_write,
69832 - .maxlen = sizeof(atomic_t),
69833 + .maxlen = sizeof(atomic_unchecked_t),
69834 .mode = 0644,
69835 .proc_handler = &read_reset_stat,
69836 },
69837 {
69838 .procname = "rdma_stat_sq_starve",
69839 .data = &rdma_stat_sq_starve,
69840 - .maxlen = sizeof(atomic_t),
69841 + .maxlen = sizeof(atomic_unchecked_t),
69842 .mode = 0644,
69843 .proc_handler = &read_reset_stat,
69844 },
69845 {
69846 .procname = "rdma_stat_rq_starve",
69847 .data = &rdma_stat_rq_starve,
69848 - .maxlen = sizeof(atomic_t),
69849 + .maxlen = sizeof(atomic_unchecked_t),
69850 .mode = 0644,
69851 .proc_handler = &read_reset_stat,
69852 },
69853 {
69854 .procname = "rdma_stat_rq_poll",
69855 .data = &rdma_stat_rq_poll,
69856 - .maxlen = sizeof(atomic_t),
69857 + .maxlen = sizeof(atomic_unchecked_t),
69858 .mode = 0644,
69859 .proc_handler = &read_reset_stat,
69860 },
69861 {
69862 .procname = "rdma_stat_rq_prod",
69863 .data = &rdma_stat_rq_prod,
69864 - .maxlen = sizeof(atomic_t),
69865 + .maxlen = sizeof(atomic_unchecked_t),
69866 .mode = 0644,
69867 .proc_handler = &read_reset_stat,
69868 },
69869 {
69870 .procname = "rdma_stat_sq_poll",
69871 .data = &rdma_stat_sq_poll,
69872 - .maxlen = sizeof(atomic_t),
69873 + .maxlen = sizeof(atomic_unchecked_t),
69874 .mode = 0644,
69875 .proc_handler = &read_reset_stat,
69876 },
69877 {
69878 .procname = "rdma_stat_sq_prod",
69879 .data = &rdma_stat_sq_prod,
69880 - .maxlen = sizeof(atomic_t),
69881 + .maxlen = sizeof(atomic_unchecked_t),
69882 .mode = 0644,
69883 .proc_handler = &read_reset_stat,
69884 },
69885 diff -urNp linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
69886 --- linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-03-27 14:31:47.000000000 -0400
69887 +++ linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-04 17:56:28.000000000 -0400
69888 @@ -495,7 +495,7 @@ next_sge:
69889 svc_rdma_put_context(ctxt, 0);
69890 goto out;
69891 }
69892 - atomic_inc(&rdma_stat_read);
69893 + atomic_inc_unchecked(&rdma_stat_read);
69894
69895 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
69896 chl_map->ch[ch_no].count -= read_wr.num_sge;
69897 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
69898 dto_q);
69899 list_del_init(&ctxt->dto_q);
69900 } else {
69901 - atomic_inc(&rdma_stat_rq_starve);
69902 + atomic_inc_unchecked(&rdma_stat_rq_starve);
69903 clear_bit(XPT_DATA, &xprt->xpt_flags);
69904 ctxt = NULL;
69905 }
69906 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
69907 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
69908 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
69909 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
69910 - atomic_inc(&rdma_stat_recv);
69911 + atomic_inc_unchecked(&rdma_stat_recv);
69912
69913 /* Build up the XDR from the receive buffers. */
69914 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
69915 diff -urNp linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_sendto.c
69916 --- linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-03-27 14:31:47.000000000 -0400
69917 +++ linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-04 17:56:28.000000000 -0400
69918 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdm
69919 write_wr.wr.rdma.remote_addr = to;
69920
69921 /* Post It */
69922 - atomic_inc(&rdma_stat_write);
69923 + atomic_inc_unchecked(&rdma_stat_write);
69924 if (svc_rdma_send(xprt, &write_wr))
69925 goto err;
69926 return 0;
69927 diff -urNp linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_transport.c
69928 --- linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-03-27 14:31:47.000000000 -0400
69929 +++ linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-04 17:56:28.000000000 -0400
69930 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rd
69931 return;
69932
69933 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
69934 - atomic_inc(&rdma_stat_rq_poll);
69935 + atomic_inc_unchecked(&rdma_stat_rq_poll);
69936
69937 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
69938 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
69939 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rd
69940 }
69941
69942 if (ctxt)
69943 - atomic_inc(&rdma_stat_rq_prod);
69944 + atomic_inc_unchecked(&rdma_stat_rq_prod);
69945
69946 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
69947 /*
69948 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rd
69949 return;
69950
69951 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
69952 - atomic_inc(&rdma_stat_sq_poll);
69953 + atomic_inc_unchecked(&rdma_stat_sq_poll);
69954 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
69955 if (wc.status != IB_WC_SUCCESS)
69956 /* Close the transport */
69957 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rd
69958 }
69959
69960 if (ctxt)
69961 - atomic_inc(&rdma_stat_sq_prod);
69962 + atomic_inc_unchecked(&rdma_stat_sq_prod);
69963 }
69964
69965 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
69966 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
69967 spin_lock_bh(&xprt->sc_lock);
69968 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
69969 spin_unlock_bh(&xprt->sc_lock);
69970 - atomic_inc(&rdma_stat_sq_starve);
69971 + atomic_inc_unchecked(&rdma_stat_sq_starve);
69972
69973 /* See if we can opportunistically reap SQ WR to make room */
69974 sq_cq_reap(xprt);
69975 diff -urNp linux-2.6.32.42/net/sysctl_net.c linux-2.6.32.42/net/sysctl_net.c
69976 --- linux-2.6.32.42/net/sysctl_net.c 2011-03-27 14:31:47.000000000 -0400
69977 +++ linux-2.6.32.42/net/sysctl_net.c 2011-04-17 15:56:46.000000000 -0400
69978 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
69979 struct ctl_table *table)
69980 {
69981 /* Allow network administrator to have same access as root. */
69982 - if (capable(CAP_NET_ADMIN)) {
69983 + if (capable_nolog(CAP_NET_ADMIN)) {
69984 int mode = (table->mode >> 6) & 7;
69985 return (mode << 6) | (mode << 3) | mode;
69986 }
69987 diff -urNp linux-2.6.32.42/net/unix/af_unix.c linux-2.6.32.42/net/unix/af_unix.c
69988 --- linux-2.6.32.42/net/unix/af_unix.c 2011-05-10 22:12:02.000000000 -0400
69989 +++ linux-2.6.32.42/net/unix/af_unix.c 2011-05-10 22:12:34.000000000 -0400
69990 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(stru
69991 err = -ECONNREFUSED;
69992 if (!S_ISSOCK(inode->i_mode))
69993 goto put_fail;
69994 +
69995 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
69996 + err = -EACCES;
69997 + goto put_fail;
69998 + }
69999 +
70000 u = unix_find_socket_byinode(net, inode);
70001 if (!u)
70002 goto put_fail;
70003 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(stru
70004 if (u) {
70005 struct dentry *dentry;
70006 dentry = unix_sk(u)->dentry;
70007 +
70008 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
70009 + err = -EPERM;
70010 + sock_put(u);
70011 + goto fail;
70012 + }
70013 +
70014 if (dentry)
70015 touch_atime(unix_sk(u)->mnt, dentry);
70016 } else
70017 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock
70018 err = security_path_mknod(&nd.path, dentry, mode, 0);
70019 if (err)
70020 goto out_mknod_drop_write;
70021 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
70022 + err = -EACCES;
70023 + goto out_mknod_drop_write;
70024 + }
70025 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
70026 out_mknod_drop_write:
70027 mnt_drop_write(nd.path.mnt);
70028 if (err)
70029 goto out_mknod_dput;
70030 +
70031 + gr_handle_create(dentry, nd.path.mnt);
70032 +
70033 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
70034 dput(nd.path.dentry);
70035 nd.path.dentry = dentry;
70036 @@ -872,6 +892,10 @@ out_mknod_drop_write:
70037 goto out_unlock;
70038 }
70039
70040 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
70041 + sk->sk_peercred.pid = current->pid;
70042 +#endif
70043 +
70044 list = &unix_socket_table[addr->hash];
70045 } else {
70046 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
70047 @@ -2211,7 +2235,11 @@ static int unix_seq_show(struct seq_file
70048 unix_state_lock(s);
70049
70050 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
70051 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70052 + NULL,
70053 +#else
70054 s,
70055 +#endif
70056 atomic_read(&s->sk_refcnt),
70057 0,
70058 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
70059 diff -urNp linux-2.6.32.42/net/wireless/wext.c linux-2.6.32.42/net/wireless/wext.c
70060 --- linux-2.6.32.42/net/wireless/wext.c 2011-03-27 14:31:47.000000000 -0400
70061 +++ linux-2.6.32.42/net/wireless/wext.c 2011-04-17 15:56:46.000000000 -0400
70062 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struc
70063 */
70064
70065 /* Support for very large requests */
70066 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
70067 - (user_length > descr->max_tokens)) {
70068 + if (user_length > descr->max_tokens) {
70069 /* Allow userspace to GET more than max so
70070 * we can support any size GET requests.
70071 * There is still a limit : -ENOMEM.
70072 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struc
70073 }
70074 }
70075
70076 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
70077 - /*
70078 - * If this is a GET, but not NOMAX, it means that the extra
70079 - * data is not bounded by userspace, but by max_tokens. Thus
70080 - * set the length to max_tokens. This matches the extra data
70081 - * allocation.
70082 - * The driver should fill it with the number of tokens it
70083 - * provided, and it may check iwp->length rather than having
70084 - * knowledge of max_tokens. If the driver doesn't change the
70085 - * iwp->length, this ioctl just copies back max_token tokens
70086 - * filled with zeroes. Hopefully the driver isn't claiming
70087 - * them to be valid data.
70088 - */
70089 - iwp->length = descr->max_tokens;
70090 - }
70091 -
70092 err = handler(dev, info, (union iwreq_data *) iwp, extra);
70093
70094 iwp->length += essid_compat;
70095 diff -urNp linux-2.6.32.42/net/xfrm/xfrm_policy.c linux-2.6.32.42/net/xfrm/xfrm_policy.c
70096 --- linux-2.6.32.42/net/xfrm/xfrm_policy.c 2011-03-27 14:31:47.000000000 -0400
70097 +++ linux-2.6.32.42/net/xfrm/xfrm_policy.c 2011-05-04 17:56:20.000000000 -0400
70098 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct x
70099 hlist_add_head(&policy->bydst, chain);
70100 xfrm_pol_hold(policy);
70101 net->xfrm.policy_count[dir]++;
70102 - atomic_inc(&flow_cache_genid);
70103 + atomic_inc_unchecked(&flow_cache_genid);
70104 if (delpol)
70105 __xfrm_policy_unlink(delpol, dir);
70106 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
70107 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ct
70108 write_unlock_bh(&xfrm_policy_lock);
70109
70110 if (ret && delete) {
70111 - atomic_inc(&flow_cache_genid);
70112 + atomic_inc_unchecked(&flow_cache_genid);
70113 xfrm_policy_kill(ret);
70114 }
70115 return ret;
70116 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(str
70117 write_unlock_bh(&xfrm_policy_lock);
70118
70119 if (ret && delete) {
70120 - atomic_inc(&flow_cache_genid);
70121 + atomic_inc_unchecked(&flow_cache_genid);
70122 xfrm_policy_kill(ret);
70123 }
70124 return ret;
70125 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u
70126 }
70127
70128 }
70129 - atomic_inc(&flow_cache_genid);
70130 + atomic_inc_unchecked(&flow_cache_genid);
70131 out:
70132 write_unlock_bh(&xfrm_policy_lock);
70133 return err;
70134 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_polic
70135 write_unlock_bh(&xfrm_policy_lock);
70136 if (pol) {
70137 if (dir < XFRM_POLICY_MAX)
70138 - atomic_inc(&flow_cache_genid);
70139 + atomic_inc_unchecked(&flow_cache_genid);
70140 xfrm_policy_kill(pol);
70141 return 0;
70142 }
70143 @@ -1477,7 +1477,7 @@ free_dst:
70144 goto out;
70145 }
70146
70147 -static int inline
70148 +static inline int
70149 xfrm_dst_alloc_copy(void **target, void *src, int size)
70150 {
70151 if (!*target) {
70152 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void
70153 return 0;
70154 }
70155
70156 -static int inline
70157 +static inline int
70158 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
70159 {
70160 #ifdef CONFIG_XFRM_SUB_POLICY
70161 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry
70162 #endif
70163 }
70164
70165 -static int inline
70166 +static inline int
70167 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
70168 {
70169 #ifdef CONFIG_XFRM_SUB_POLICY
70170 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struc
70171 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
70172
70173 restart:
70174 - genid = atomic_read(&flow_cache_genid);
70175 + genid = atomic_read_unchecked(&flow_cache_genid);
70176 policy = NULL;
70177 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
70178 pols[pi] = NULL;
70179 @@ -1680,7 +1680,7 @@ restart:
70180 goto error;
70181 }
70182 if (nx == -EAGAIN ||
70183 - genid != atomic_read(&flow_cache_genid)) {
70184 + genid != atomic_read_unchecked(&flow_cache_genid)) {
70185 xfrm_pols_put(pols, npols);
70186 goto restart;
70187 }
70188 diff -urNp linux-2.6.32.42/net/xfrm/xfrm_user.c linux-2.6.32.42/net/xfrm/xfrm_user.c
70189 --- linux-2.6.32.42/net/xfrm/xfrm_user.c 2011-03-27 14:31:47.000000000 -0400
70190 +++ linux-2.6.32.42/net/xfrm/xfrm_user.c 2011-05-16 21:46:57.000000000 -0400
70191 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm
70192 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
70193 int i;
70194
70195 + pax_track_stack();
70196 +
70197 if (xp->xfrm_nr == 0)
70198 return 0;
70199
70200 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buf
70201 int err;
70202 int n = 0;
70203
70204 + pax_track_stack();
70205 +
70206 if (attrs[XFRMA_MIGRATE] == NULL)
70207 return -EINVAL;
70208
70209 diff -urNp linux-2.6.32.42/samples/kobject/kset-example.c linux-2.6.32.42/samples/kobject/kset-example.c
70210 --- linux-2.6.32.42/samples/kobject/kset-example.c 2011-03-27 14:31:47.000000000 -0400
70211 +++ linux-2.6.32.42/samples/kobject/kset-example.c 2011-04-17 15:56:46.000000000 -0400
70212 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kob
70213 }
70214
70215 /* Our custom sysfs_ops that we will associate with our ktype later on */
70216 -static struct sysfs_ops foo_sysfs_ops = {
70217 +static const struct sysfs_ops foo_sysfs_ops = {
70218 .show = foo_attr_show,
70219 .store = foo_attr_store,
70220 };
70221 diff -urNp linux-2.6.32.42/scripts/basic/fixdep.c linux-2.6.32.42/scripts/basic/fixdep.c
70222 --- linux-2.6.32.42/scripts/basic/fixdep.c 2011-03-27 14:31:47.000000000 -0400
70223 +++ linux-2.6.32.42/scripts/basic/fixdep.c 2011-04-17 15:56:46.000000000 -0400
70224 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen
70225
70226 static void parse_config_file(char *map, size_t len)
70227 {
70228 - int *end = (int *) (map + len);
70229 + unsigned int *end = (unsigned int *) (map + len);
70230 /* start at +1, so that p can never be < map */
70231 - int *m = (int *) map + 1;
70232 + unsigned int *m = (unsigned int *) map + 1;
70233 char *p, *q;
70234
70235 for (; m < end; m++) {
70236 @@ -371,7 +371,7 @@ static void print_deps(void)
70237 static void traps(void)
70238 {
70239 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
70240 - int *p = (int *)test;
70241 + unsigned int *p = (unsigned int *)test;
70242
70243 if (*p != INT_CONF) {
70244 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
70245 diff -urNp linux-2.6.32.42/scripts/Makefile.build linux-2.6.32.42/scripts/Makefile.build
70246 --- linux-2.6.32.42/scripts/Makefile.build 2011-03-27 14:31:47.000000000 -0400
70247 +++ linux-2.6.32.42/scripts/Makefile.build 2011-06-04 20:46:51.000000000 -0400
70248 @@ -59,7 +59,7 @@ endif
70249 endif
70250
70251 # Do not include host rules unless needed
70252 -ifneq ($(hostprogs-y)$(hostprogs-m),)
70253 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
70254 include scripts/Makefile.host
70255 endif
70256
70257 diff -urNp linux-2.6.32.42/scripts/Makefile.clean linux-2.6.32.42/scripts/Makefile.clean
70258 --- linux-2.6.32.42/scripts/Makefile.clean 2011-03-27 14:31:47.000000000 -0400
70259 +++ linux-2.6.32.42/scripts/Makefile.clean 2011-06-04 20:47:19.000000000 -0400
70260 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
70261 __clean-files := $(extra-y) $(always) \
70262 $(targets) $(clean-files) \
70263 $(host-progs) \
70264 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
70265 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
70266 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
70267
70268 # as clean-files is given relative to the current directory, this adds
70269 # a $(obj) prefix, except for absolute paths
70270 diff -urNp linux-2.6.32.42/scripts/Makefile.host linux-2.6.32.42/scripts/Makefile.host
70271 --- linux-2.6.32.42/scripts/Makefile.host 2011-03-27 14:31:47.000000000 -0400
70272 +++ linux-2.6.32.42/scripts/Makefile.host 2011-06-04 20:48:22.000000000 -0400
70273 @@ -31,6 +31,7 @@
70274 # Note: Shared libraries consisting of C++ files are not supported
70275
70276 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
70277 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
70278
70279 # C code
70280 # Executables compiled from a single .c file
70281 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
70282 # Shared libaries (only .c supported)
70283 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
70284 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
70285 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
70286 # Remove .so files from "xxx-objs"
70287 host-cobjs := $(filter-out %.so,$(host-cobjs))
70288
70289 diff -urNp linux-2.6.32.42/scripts/mod/file2alias.c linux-2.6.32.42/scripts/mod/file2alias.c
70290 --- linux-2.6.32.42/scripts/mod/file2alias.c 2011-03-27 14:31:47.000000000 -0400
70291 +++ linux-2.6.32.42/scripts/mod/file2alias.c 2011-04-17 15:56:46.000000000 -0400
70292 @@ -72,7 +72,7 @@ static void device_id_check(const char *
70293 unsigned long size, unsigned long id_size,
70294 void *symval)
70295 {
70296 - int i;
70297 + unsigned int i;
70298
70299 if (size % id_size || size < id_size) {
70300 if (cross_build != 0)
70301 @@ -102,7 +102,7 @@ static void device_id_check(const char *
70302 /* USB is special because the bcdDevice can be matched against a numeric range */
70303 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
70304 static void do_usb_entry(struct usb_device_id *id,
70305 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
70306 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
70307 unsigned char range_lo, unsigned char range_hi,
70308 struct module *mod)
70309 {
70310 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *sy
70311 for (i = 0; i < count; i++) {
70312 const char *id = (char *)devs[i].id;
70313 char acpi_id[sizeof(devs[0].id)];
70314 - int j;
70315 + unsigned int j;
70316
70317 buf_printf(&mod->dev_table_buf,
70318 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
70319 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *sy
70320
70321 for (j = 0; j < PNP_MAX_DEVICES; j++) {
70322 const char *id = (char *)card->devs[j].id;
70323 - int i2, j2;
70324 + unsigned int i2, j2;
70325 int dup = 0;
70326
70327 if (!id[0])
70328 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *sy
70329 /* add an individual alias for every device entry */
70330 if (!dup) {
70331 char acpi_id[sizeof(card->devs[0].id)];
70332 - int k;
70333 + unsigned int k;
70334
70335 buf_printf(&mod->dev_table_buf,
70336 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
70337 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, co
70338 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
70339 char *alias)
70340 {
70341 - int i, j;
70342 + unsigned int i, j;
70343
70344 sprintf(alias, "dmi*");
70345
70346 diff -urNp linux-2.6.32.42/scripts/mod/modpost.c linux-2.6.32.42/scripts/mod/modpost.c
70347 --- linux-2.6.32.42/scripts/mod/modpost.c 2011-03-27 14:31:47.000000000 -0400
70348 +++ linux-2.6.32.42/scripts/mod/modpost.c 2011-04-17 15:56:46.000000000 -0400
70349 @@ -835,6 +835,7 @@ enum mismatch {
70350 INIT_TO_EXIT,
70351 EXIT_TO_INIT,
70352 EXPORT_TO_INIT_EXIT,
70353 + DATA_TO_TEXT
70354 };
70355
70356 struct sectioncheck {
70357 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[]
70358 .fromsec = { "__ksymtab*", NULL },
70359 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
70360 .mismatch = EXPORT_TO_INIT_EXIT
70361 +},
70362 +/* Do not reference code from writable data */
70363 +{
70364 + .fromsec = { DATA_SECTIONS, NULL },
70365 + .tosec = { TEXT_SECTIONS, NULL },
70366 + .mismatch = DATA_TO_TEXT
70367 }
70368 };
70369
70370 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct e
70371 continue;
70372 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
70373 continue;
70374 - if (sym->st_value == addr)
70375 - return sym;
70376 /* Find a symbol nearby - addr are maybe negative */
70377 d = sym->st_value - addr;
70378 + if (d == 0)
70379 + return sym;
70380 if (d < 0)
70381 d = addr - sym->st_value;
70382 if (d < distance) {
70383 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const ch
70384 "Fix this by removing the %sannotation of %s "
70385 "or drop the export.\n",
70386 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
70387 + case DATA_TO_TEXT:
70388 +/*
70389 + fprintf(stderr,
70390 + "The variable %s references\n"
70391 + "the %s %s%s%s\n",
70392 + fromsym, to, sec2annotation(tosec), tosym, to_p);
70393 +*/
70394 + break;
70395 case NO_MISMATCH:
70396 /* To get warnings on missing members */
70397 break;
70398 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3)
70399 va_end(ap);
70400 }
70401
70402 -void buf_write(struct buffer *buf, const char *s, int len)
70403 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
70404 {
70405 if (buf->size - buf->pos < len) {
70406 buf->size += len + SZ;
70407 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buff
70408 if (fstat(fileno(file), &st) < 0)
70409 goto close_write;
70410
70411 - if (st.st_size != b->pos)
70412 + if (st.st_size != (off_t)b->pos)
70413 goto close_write;
70414
70415 tmp = NOFAIL(malloc(b->pos));
70416 diff -urNp linux-2.6.32.42/scripts/mod/modpost.h linux-2.6.32.42/scripts/mod/modpost.h
70417 --- linux-2.6.32.42/scripts/mod/modpost.h 2011-03-27 14:31:47.000000000 -0400
70418 +++ linux-2.6.32.42/scripts/mod/modpost.h 2011-04-17 15:56:46.000000000 -0400
70419 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
70420
70421 struct buffer {
70422 char *p;
70423 - int pos;
70424 - int size;
70425 + unsigned int pos;
70426 + unsigned int size;
70427 };
70428
70429 void __attribute__((format(printf, 2, 3)))
70430 buf_printf(struct buffer *buf, const char *fmt, ...);
70431
70432 void
70433 -buf_write(struct buffer *buf, const char *s, int len);
70434 +buf_write(struct buffer *buf, const char *s, unsigned int len);
70435
70436 struct module {
70437 struct module *next;
70438 diff -urNp linux-2.6.32.42/scripts/mod/sumversion.c linux-2.6.32.42/scripts/mod/sumversion.c
70439 --- linux-2.6.32.42/scripts/mod/sumversion.c 2011-03-27 14:31:47.000000000 -0400
70440 +++ linux-2.6.32.42/scripts/mod/sumversion.c 2011-04-17 15:56:46.000000000 -0400
70441 @@ -455,7 +455,7 @@ static void write_version(const char *fi
70442 goto out;
70443 }
70444
70445 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
70446 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
70447 warn("writing sum in %s failed: %s\n",
70448 filename, strerror(errno));
70449 goto out;
70450 diff -urNp linux-2.6.32.42/scripts/pnmtologo.c linux-2.6.32.42/scripts/pnmtologo.c
70451 --- linux-2.6.32.42/scripts/pnmtologo.c 2011-03-27 14:31:47.000000000 -0400
70452 +++ linux-2.6.32.42/scripts/pnmtologo.c 2011-04-17 15:56:46.000000000 -0400
70453 @@ -237,14 +237,14 @@ static void write_header(void)
70454 fprintf(out, " * Linux logo %s\n", logoname);
70455 fputs(" */\n\n", out);
70456 fputs("#include <linux/linux_logo.h>\n\n", out);
70457 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
70458 + fprintf(out, "static unsigned char %s_data[] = {\n",
70459 logoname);
70460 }
70461
70462 static void write_footer(void)
70463 {
70464 fputs("\n};\n\n", out);
70465 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
70466 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
70467 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
70468 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
70469 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
70470 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
70471 fputs("\n};\n\n", out);
70472
70473 /* write logo clut */
70474 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
70475 + fprintf(out, "static unsigned char %s_clut[] = {\n",
70476 logoname);
70477 write_hex_cnt = 0;
70478 for (i = 0; i < logo_clutsize; i++) {
70479 diff -urNp linux-2.6.32.42/scripts/tags.sh linux-2.6.32.42/scripts/tags.sh
70480 --- linux-2.6.32.42/scripts/tags.sh 2011-03-27 14:31:47.000000000 -0400
70481 +++ linux-2.6.32.42/scripts/tags.sh 2011-06-07 18:06:04.000000000 -0400
70482 @@ -93,6 +93,11 @@ docscope()
70483 cscope -b -f cscope.out
70484 }
70485
70486 +dogtags()
70487 +{
70488 + all_sources | gtags -f -
70489 +}
70490 +
70491 exuberant()
70492 {
70493 all_sources | xargs $1 -a \
70494 @@ -164,6 +169,10 @@ case "$1" in
70495 docscope
70496 ;;
70497
70498 + "gtags")
70499 + dogtags
70500 + ;;
70501 +
70502 "tags")
70503 rm -f tags
70504 xtags ctags
70505 diff -urNp linux-2.6.32.42/security/capability.c linux-2.6.32.42/security/capability.c
70506 --- linux-2.6.32.42/security/capability.c 2011-03-27 14:31:47.000000000 -0400
70507 +++ linux-2.6.32.42/security/capability.c 2011-04-17 15:56:46.000000000 -0400
70508 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *ls
70509 }
70510 #endif /* CONFIG_AUDIT */
70511
70512 -struct security_operations default_security_ops = {
70513 +struct security_operations default_security_ops __read_only = {
70514 .name = "default",
70515 };
70516
70517 diff -urNp linux-2.6.32.42/security/commoncap.c linux-2.6.32.42/security/commoncap.c
70518 --- linux-2.6.32.42/security/commoncap.c 2011-03-27 14:31:47.000000000 -0400
70519 +++ linux-2.6.32.42/security/commoncap.c 2011-04-17 15:56:46.000000000 -0400
70520 @@ -27,7 +27,7 @@
70521 #include <linux/sched.h>
70522 #include <linux/prctl.h>
70523 #include <linux/securebits.h>
70524 -
70525 +#include <net/sock.h>
70526 /*
70527 * If a non-root user executes a setuid-root binary in
70528 * !secure(SECURE_NOROOT) mode, then we raise capabilities.
70529 @@ -50,9 +50,11 @@ static void warn_setuid_and_fcaps_mixed(
70530 }
70531 }
70532
70533 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
70534 +
70535 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
70536 {
70537 - NETLINK_CB(skb).eff_cap = current_cap();
70538 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
70539 return 0;
70540 }
70541
70542 @@ -582,6 +584,9 @@ int cap_bprm_secureexec(struct linux_bin
70543 {
70544 const struct cred *cred = current_cred();
70545
70546 + if (gr_acl_enable_at_secure())
70547 + return 1;
70548 +
70549 if (cred->uid != 0) {
70550 if (bprm->cap_effective)
70551 return 1;
70552 diff -urNp linux-2.6.32.42/security/integrity/ima/ima_api.c linux-2.6.32.42/security/integrity/ima/ima_api.c
70553 --- linux-2.6.32.42/security/integrity/ima/ima_api.c 2011-03-27 14:31:47.000000000 -0400
70554 +++ linux-2.6.32.42/security/integrity/ima/ima_api.c 2011-04-17 15:56:46.000000000 -0400
70555 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *ino
70556 int result;
70557
70558 /* can overflow, only indicator */
70559 - atomic_long_inc(&ima_htable.violations);
70560 + atomic_long_inc_unchecked(&ima_htable.violations);
70561
70562 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
70563 if (!entry) {
70564 diff -urNp linux-2.6.32.42/security/integrity/ima/ima_fs.c linux-2.6.32.42/security/integrity/ima/ima_fs.c
70565 --- linux-2.6.32.42/security/integrity/ima/ima_fs.c 2011-03-27 14:31:47.000000000 -0400
70566 +++ linux-2.6.32.42/security/integrity/ima/ima_fs.c 2011-04-17 15:56:46.000000000 -0400
70567 @@ -27,12 +27,12 @@
70568 static int valid_policy = 1;
70569 #define TMPBUFLEN 12
70570 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
70571 - loff_t *ppos, atomic_long_t *val)
70572 + loff_t *ppos, atomic_long_unchecked_t *val)
70573 {
70574 char tmpbuf[TMPBUFLEN];
70575 ssize_t len;
70576
70577 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
70578 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
70579 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
70580 }
70581
70582 diff -urNp linux-2.6.32.42/security/integrity/ima/ima.h linux-2.6.32.42/security/integrity/ima/ima.h
70583 --- linux-2.6.32.42/security/integrity/ima/ima.h 2011-03-27 14:31:47.000000000 -0400
70584 +++ linux-2.6.32.42/security/integrity/ima/ima.h 2011-04-17 15:56:46.000000000 -0400
70585 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *ino
70586 extern spinlock_t ima_queue_lock;
70587
70588 struct ima_h_table {
70589 - atomic_long_t len; /* number of stored measurements in the list */
70590 - atomic_long_t violations;
70591 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
70592 + atomic_long_unchecked_t violations;
70593 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
70594 };
70595 extern struct ima_h_table ima_htable;
70596 diff -urNp linux-2.6.32.42/security/integrity/ima/ima_queue.c linux-2.6.32.42/security/integrity/ima/ima_queue.c
70597 --- linux-2.6.32.42/security/integrity/ima/ima_queue.c 2011-03-27 14:31:47.000000000 -0400
70598 +++ linux-2.6.32.42/security/integrity/ima/ima_queue.c 2011-04-17 15:56:46.000000000 -0400
70599 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct i
70600 INIT_LIST_HEAD(&qe->later);
70601 list_add_tail_rcu(&qe->later, &ima_measurements);
70602
70603 - atomic_long_inc(&ima_htable.len);
70604 + atomic_long_inc_unchecked(&ima_htable.len);
70605 key = ima_hash_key(entry->digest);
70606 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
70607 return 0;
70608 diff -urNp linux-2.6.32.42/security/Kconfig linux-2.6.32.42/security/Kconfig
70609 --- linux-2.6.32.42/security/Kconfig 2011-03-27 14:31:47.000000000 -0400
70610 +++ linux-2.6.32.42/security/Kconfig 2011-06-04 20:45:36.000000000 -0400
70611 @@ -4,6 +4,555 @@
70612
70613 menu "Security options"
70614
70615 +source grsecurity/Kconfig
70616 +
70617 +menu "PaX"
70618 +
70619 + config ARCH_TRACK_EXEC_LIMIT
70620 + bool
70621 +
70622 + config PAX_PER_CPU_PGD
70623 + bool
70624 +
70625 + config TASK_SIZE_MAX_SHIFT
70626 + int
70627 + depends on X86_64
70628 + default 47 if !PAX_PER_CPU_PGD
70629 + default 42 if PAX_PER_CPU_PGD
70630 +
70631 + config PAX_ENABLE_PAE
70632 + bool
70633 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
70634 +
70635 +config PAX
70636 + bool "Enable various PaX features"
70637 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
70638 + help
70639 + This allows you to enable various PaX features. PaX adds
70640 + intrusion prevention mechanisms to the kernel that reduce
70641 + the risks posed by exploitable memory corruption bugs.
70642 +
70643 +menu "PaX Control"
70644 + depends on PAX
70645 +
70646 +config PAX_SOFTMODE
70647 + bool 'Support soft mode'
70648 + select PAX_PT_PAX_FLAGS
70649 + help
70650 + Enabling this option will allow you to run PaX in soft mode, that
70651 + is, PaX features will not be enforced by default, only on executables
70652 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
70653 + is the only way to mark executables for soft mode use.
70654 +
70655 + Soft mode can be activated by using the "pax_softmode=1" kernel command
70656 + line option on boot. Furthermore you can control various PaX features
70657 + at runtime via the entries in /proc/sys/kernel/pax.
70658 +
70659 +config PAX_EI_PAX
70660 + bool 'Use legacy ELF header marking'
70661 + help
70662 + Enabling this option will allow you to control PaX features on
70663 + a per executable basis via the 'chpax' utility available at
70664 + http://pax.grsecurity.net/. The control flags will be read from
70665 + an otherwise reserved part of the ELF header. This marking has
70666 + numerous drawbacks (no support for soft-mode, toolchain does not
70667 + know about the non-standard use of the ELF header) therefore it
70668 + has been deprecated in favour of PT_PAX_FLAGS support.
70669 +
70670 + Note that if you enable PT_PAX_FLAGS marking support as well,
70671 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
70672 +
70673 +config PAX_PT_PAX_FLAGS
70674 + bool 'Use ELF program header marking'
70675 + help
70676 + Enabling this option will allow you to control PaX features on
70677 + a per executable basis via the 'paxctl' utility available at
70678 + http://pax.grsecurity.net/. The control flags will be read from
70679 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
70680 + has the benefits of supporting both soft mode and being fully
70681 + integrated into the toolchain (the binutils patch is available
70682 + from http://pax.grsecurity.net).
70683 +
70684 + If your toolchain does not support PT_PAX_FLAGS markings,
70685 + you can create one in most cases with 'paxctl -C'.
70686 +
70687 + Note that if you enable the legacy EI_PAX marking support as well,
70688 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
70689 +
70690 +choice
70691 + prompt 'MAC system integration'
70692 + default PAX_HAVE_ACL_FLAGS
70693 + help
70694 + Mandatory Access Control systems have the option of controlling
70695 + PaX flags on a per executable basis, choose the method supported
70696 + by your particular system.
70697 +
70698 + - "none": if your MAC system does not interact with PaX,
70699 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
70700 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
70701 +
70702 + NOTE: this option is for developers/integrators only.
70703 +
70704 + config PAX_NO_ACL_FLAGS
70705 + bool 'none'
70706 +
70707 + config PAX_HAVE_ACL_FLAGS
70708 + bool 'direct'
70709 +
70710 + config PAX_HOOK_ACL_FLAGS
70711 + bool 'hook'
70712 +endchoice
70713 +
70714 +endmenu
70715 +
70716 +menu "Non-executable pages"
70717 + depends on PAX
70718 +
70719 +config PAX_NOEXEC
70720 + bool "Enforce non-executable pages"
70721 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
70722 + help
70723 + By design some architectures do not allow for protecting memory
70724 + pages against execution or even if they do, Linux does not make
70725 + use of this feature. In practice this means that if a page is
70726 + readable (such as the stack or heap) it is also executable.
70727 +
70728 + There is a well known exploit technique that makes use of this
70729 + fact and a common programming mistake where an attacker can
70730 + introduce code of his choice somewhere in the attacked program's
70731 + memory (typically the stack or the heap) and then execute it.
70732 +
70733 + If the attacked program was running with different (typically
70734 + higher) privileges than that of the attacker, then he can elevate
70735 + his own privilege level (e.g. get a root shell, write to files for
70736 + which he does not have write access to, etc).
70737 +
70738 + Enabling this option will let you choose from various features
70739 + that prevent the injection and execution of 'foreign' code in
70740 + a program.
70741 +
70742 + This will also break programs that rely on the old behaviour and
70743 + expect that dynamically allocated memory via the malloc() family
70744 + of functions is executable (which it is not). Notable examples
70745 + are the XFree86 4.x server, the java runtime and wine.
70746 +
70747 +config PAX_PAGEEXEC
70748 + bool "Paging based non-executable pages"
70749 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
70750 + select S390_SWITCH_AMODE if S390
70751 + select S390_EXEC_PROTECT if S390
70752 + select ARCH_TRACK_EXEC_LIMIT if X86_32
70753 + help
70754 + This implementation is based on the paging feature of the CPU.
70755 + On i386 without hardware non-executable bit support there is a
70756 + variable but usually low performance impact, however on Intel's
70757 + P4 core based CPUs it is very high so you should not enable this
70758 + for kernels meant to be used on such CPUs.
70759 +
70760 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
70761 + with hardware non-executable bit support there is no performance
70762 + impact, on ppc the impact is negligible.
70763 +
70764 + Note that several architectures require various emulations due to
70765 + badly designed userland ABIs, this will cause a performance impact
70766 + but will disappear as soon as userland is fixed. For example, ppc
70767 + userland MUST have been built with secure-plt by a recent toolchain.
70768 +
70769 +config PAX_SEGMEXEC
70770 + bool "Segmentation based non-executable pages"
70771 + depends on PAX_NOEXEC && X86_32
70772 + help
70773 + This implementation is based on the segmentation feature of the
70774 + CPU and has a very small performance impact, however applications
70775 + will be limited to a 1.5 GB address space instead of the normal
70776 + 3 GB.
70777 +
70778 +config PAX_EMUTRAMP
70779 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
70780 + default y if PARISC
70781 + help
70782 + There are some programs and libraries that for one reason or
70783 + another attempt to execute special small code snippets from
70784 + non-executable memory pages. Most notable examples are the
70785 + signal handler return code generated by the kernel itself and
70786 + the GCC trampolines.
70787 +
70788 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
70789 + such programs will no longer work under your kernel.
70790 +
70791 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
70792 + utilities to enable trampoline emulation for the affected programs
70793 + yet still have the protection provided by the non-executable pages.
70794 +
70795 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
70796 + your system will not even boot.
70797 +
70798 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
70799 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
70800 + for the affected files.
70801 +
70802 + NOTE: enabling this feature *may* open up a loophole in the
70803 + protection provided by non-executable pages that an attacker
70804 + could abuse. Therefore the best solution is to not have any
70805 + files on your system that would require this option. This can
70806 + be achieved by not using libc5 (which relies on the kernel
70807 + signal handler return code) and not using or rewriting programs
70808 + that make use of the nested function implementation of GCC.
70809 + Skilled users can just fix GCC itself so that it implements
70810 + nested function calls in a way that does not interfere with PaX.
70811 +
70812 +config PAX_EMUSIGRT
70813 + bool "Automatically emulate sigreturn trampolines"
70814 + depends on PAX_EMUTRAMP && PARISC
70815 + default y
70816 + help
70817 + Enabling this option will have the kernel automatically detect
70818 + and emulate signal return trampolines executing on the stack
70819 + that would otherwise lead to task termination.
70820 +
70821 + This solution is intended as a temporary one for users with
70822 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
70823 + Modula-3 runtime, etc) or executables linked to such, basically
70824 + everything that does not specify its own SA_RESTORER function in
70825 + normal executable memory like glibc 2.1+ does.
70826 +
70827 + On parisc you MUST enable this option, otherwise your system will
70828 + not even boot.
70829 +
70830 + NOTE: this feature cannot be disabled on a per executable basis
70831 + and since it *does* open up a loophole in the protection provided
70832 + by non-executable pages, the best solution is to not have any
70833 + files on your system that would require this option.
70834 +
70835 +config PAX_MPROTECT
70836 + bool "Restrict mprotect()"
70837 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
70838 + help
70839 + Enabling this option will prevent programs from
70840 + - changing the executable status of memory pages that were
70841 + not originally created as executable,
70842 + - making read-only executable pages writable again,
70843 + - creating executable pages from anonymous memory,
70844 + - making read-only-after-relocations (RELRO) data pages writable again.
70845 +
70846 + You should say Y here to complete the protection provided by
70847 + the enforcement of non-executable pages.
70848 +
70849 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
70850 + this feature on a per file basis.
70851 +
70852 +config PAX_MPROTECT_COMPAT
70853 + bool "Use legacy/compat protection demoting (read help)"
70854 + depends on PAX_MPROTECT
70855 + default n
70856 + help
70857 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
70858 + by sending the proper error code to the application. For some broken
70859 + userland, this can cause problems with Python or other applications. The
70860 + current implementation however allows for applications like clamav to
70861 + detect if JIT compilation/execution is allowed and to fall back gracefully
70862 + to an interpreter-based mode if it does not. While we encourage everyone
70863 + to use the current implementation as-is and push upstream to fix broken
70864 + userland (note that the RWX logging option can assist with this), in some
70865 + environments this may not be possible. Having to disable MPROTECT
70866 + completely on certain binaries reduces the security benefit of PaX,
70867 + so this option is provided for those environments to revert to the old
70868 + behavior.
70869 +
70870 +config PAX_ELFRELOCS
70871 + bool "Allow ELF text relocations (read help)"
70872 + depends on PAX_MPROTECT
70873 + default n
70874 + help
70875 + Non-executable pages and mprotect() restrictions are effective
70876 + in preventing the introduction of new executable code into an
70877 + attacked task's address space. There remain only two venues
70878 + for this kind of attack: if the attacker can execute already
70879 + existing code in the attacked task then he can either have it
70880 + create and mmap() a file containing his code or have it mmap()
70881 + an already existing ELF library that does not have position
70882 + independent code in it and use mprotect() on it to make it
70883 + writable and copy his code there. While protecting against
70884 + the former approach is beyond PaX, the latter can be prevented
70885 + by having only PIC ELF libraries on one's system (which do not
70886 + need to relocate their code). If you are sure this is your case,
70887 + as is the case with all modern Linux distributions, then leave
70888 + this option disabled. You should say 'n' here.
70889 +
70890 +config PAX_ETEXECRELOCS
70891 + bool "Allow ELF ET_EXEC text relocations"
70892 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
70893 + select PAX_ELFRELOCS
70894 + default y
70895 + help
70896 + On some architectures there are incorrectly created applications
70897 + that require text relocations and would not work without enabling
70898 + this option. If you are an alpha, ia64 or parisc user, you should
70899 + enable this option and disable it once you have made sure that
70900 + none of your applications need it.
70901 +
70902 +config PAX_EMUPLT
70903 + bool "Automatically emulate ELF PLT"
70904 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
70905 + default y
70906 + help
70907 + Enabling this option will have the kernel automatically detect
70908 + and emulate the Procedure Linkage Table entries in ELF files.
70909 + On some architectures such entries are in writable memory, and
70910 + become non-executable leading to task termination. Therefore
70911 + it is mandatory that you enable this option on alpha, parisc,
70912 + sparc and sparc64, otherwise your system would not even boot.
70913 +
70914 + NOTE: this feature *does* open up a loophole in the protection
70915 + provided by the non-executable pages, therefore the proper
70916 + solution is to modify the toolchain to produce a PLT that does
70917 + not need to be writable.
70918 +
70919 +config PAX_DLRESOLVE
70920 + bool 'Emulate old glibc resolver stub'
70921 + depends on PAX_EMUPLT && SPARC
70922 + default n
70923 + help
70924 + This option is needed if userland has an old glibc (before 2.4)
70925 + that puts a 'save' instruction into the runtime generated resolver
70926 + stub that needs special emulation.
70927 +
70928 +config PAX_KERNEXEC
70929 + bool "Enforce non-executable kernel pages"
70930 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
70931 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
70932 + help
70933 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
70934 + that is, enabling this option will make it harder to inject
70935 + and execute 'foreign' code in kernel memory itself.
70936 +
70937 + Note that on x86_64 kernels there is a known regression when
70938 + this feature and KVM/VMX are both enabled in the host kernel.
70939 +
70940 +config PAX_KERNEXEC_MODULE_TEXT
70941 + int "Minimum amount of memory reserved for module code"
70942 + default "4"
70943 + depends on PAX_KERNEXEC && X86_32 && MODULES
70944 + help
70945 + Due to implementation details the kernel must reserve a fixed
70946 + amount of memory for module code at compile time that cannot be
70947 + changed at runtime. Here you can specify the minimum amount
70948 + in MB that will be reserved. Due to the same implementation
70949 + details this size will always be rounded up to the next 2/4 MB
70950 + boundary (depends on PAE) so the actually available memory for
70951 + module code will usually be more than this minimum.
70952 +
70953 + The default 4 MB should be enough for most users but if you have
70954 + an excessive number of modules (e.g., most distribution configs
70955 + compile many drivers as modules) or use huge modules such as
70956 + nvidia's kernel driver, you will need to adjust this amount.
70957 + A good rule of thumb is to look at your currently loaded kernel
70958 + modules and add up their sizes.
70959 +
70960 +endmenu
70961 +
70962 +menu "Address Space Layout Randomization"
70963 + depends on PAX
70964 +
70965 +config PAX_ASLR
70966 + bool "Address Space Layout Randomization"
70967 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
70968 + help
70969 + Many if not most exploit techniques rely on the knowledge of
70970 + certain addresses in the attacked program. The following options
70971 + will allow the kernel to apply a certain amount of randomization
70972 + to specific parts of the program thereby forcing an attacker to
70973 + guess them in most cases. Any failed guess will most likely crash
70974 + the attacked program which allows the kernel to detect such attempts
70975 + and react on them. PaX itself provides no reaction mechanisms,
70976 + instead it is strongly encouraged that you make use of Nergal's
70977 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
70978 + (http://www.grsecurity.net/) built-in crash detection features or
70979 + develop one yourself.
70980 +
70981 + By saying Y here you can choose to randomize the following areas:
70982 + - top of the task's kernel stack
70983 + - top of the task's userland stack
70984 + - base address for mmap() requests that do not specify one
70985 + (this includes all libraries)
70986 + - base address of the main executable
70987 +
70988 + It is strongly recommended to say Y here as address space layout
70989 + randomization has negligible impact on performance yet it provides
70990 + a very effective protection.
70991 +
70992 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
70993 + this feature on a per file basis.
70994 +
70995 +config PAX_RANDKSTACK
70996 + bool "Randomize kernel stack base"
70997 + depends on PAX_ASLR && X86_TSC && X86
70998 + help
70999 + By saying Y here the kernel will randomize every task's kernel
71000 + stack on every system call. This will not only force an attacker
71001 + to guess it but also prevent him from making use of possible
71002 + leaked information about it.
71003 +
71004 + Since the kernel stack is a rather scarce resource, randomization
71005 + may cause unexpected stack overflows, therefore you should very
71006 + carefully test your system. Note that once enabled in the kernel
71007 + configuration, this feature cannot be disabled on a per file basis.
71008 +
71009 +config PAX_RANDUSTACK
71010 + bool "Randomize user stack base"
71011 + depends on PAX_ASLR
71012 + help
71013 + By saying Y here the kernel will randomize every task's userland
71014 + stack. The randomization is done in two steps where the second
71015 + one may apply a big amount of shift to the top of the stack and
71016 + cause problems for programs that want to use lots of memory (more
71017 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
71018 + For this reason the second step can be controlled by 'chpax' or
71019 + 'paxctl' on a per file basis.
71020 +
71021 +config PAX_RANDMMAP
71022 + bool "Randomize mmap() base"
71023 + depends on PAX_ASLR
71024 + help
71025 + By saying Y here the kernel will use a randomized base address for
71026 + mmap() requests that do not specify one themselves. As a result
71027 + all dynamically loaded libraries will appear at random addresses
71028 + and therefore be harder to exploit by a technique where an attacker
71029 + attempts to execute library code for his purposes (e.g. spawn a
71030 + shell from an exploited program that is running at an elevated
71031 + privilege level).
71032 +
71033 + Furthermore, if a program is relinked as a dynamic ELF file, its
71034 + base address will be randomized as well, completing the full
71035 + randomization of the address space layout. Attacking such programs
71036 + becomes a guess game. You can find an example of doing this at
71037 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
71038 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
71039 +
71040 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
71041 + feature on a per file basis.
71042 +
71043 +endmenu
71044 +
71045 +menu "Miscellaneous hardening features"
71046 +
71047 +config PAX_MEMORY_SANITIZE
71048 + bool "Sanitize all freed memory"
71049 + help
71050 + By saying Y here the kernel will erase memory pages as soon as they
71051 + are freed. This in turn reduces the lifetime of data stored in the
71052 + pages, making it less likely that sensitive information such as
71053 + passwords, cryptographic secrets, etc stay in memory for too long.
71054 +
71055 + This is especially useful for programs whose runtime is short, long
71056 + lived processes and the kernel itself benefit from this as long as
71057 + they operate on whole memory pages and ensure timely freeing of pages
71058 + that may hold sensitive information.
71059 +
71060 + The tradeoff is performance impact, on a single CPU system kernel
71061 + compilation sees a 3% slowdown, other systems and workloads may vary
71062 + and you are advised to test this feature on your expected workload
71063 + before deploying it.
71064 +
71065 + Note that this feature does not protect data stored in live pages,
71066 + e.g., process memory swapped to disk may stay there for a long time.
71067 +
71068 +config PAX_MEMORY_STACKLEAK
71069 + bool "Sanitize kernel stack"
71070 + depends on X86
71071 + help
71072 + By saying Y here the kernel will erase the kernel stack before it
71073 + returns from a system call. This in turn reduces the information
71074 + that a kernel stack leak bug can reveal.
71075 +
71076 + Note that such a bug can still leak information that was put on
71077 + the stack by the current system call (the one eventually triggering
71078 + the bug) but traces of earlier system calls on the kernel stack
71079 + cannot leak anymore.
71080 +
71081 + The tradeoff is performance impact, on a single CPU system kernel
71082 + compilation sees a 1% slowdown, other systems and workloads may vary
71083 + and you are advised to test this feature on your expected workload
71084 + before deploying it.
71085 +
71086 + Note: full support for this feature requires gcc with plugin support
71087 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
71088 + is not supported). Using older gcc versions means that functions
71089 + with large enough stack frames may leave uninitialized memory behind
71090 + that may be exposed to a later syscall leaking the stack.
71091 +
71092 +config PAX_MEMORY_UDEREF
71093 + bool "Prevent invalid userland pointer dereference"
71094 + depends on X86 && !UML_X86 && !XEN
71095 + select PAX_PER_CPU_PGD if X86_64
71096 + help
71097 + By saying Y here the kernel will be prevented from dereferencing
71098 + userland pointers in contexts where the kernel expects only kernel
71099 + pointers. This is both a useful runtime debugging feature and a
71100 + security measure that prevents exploiting a class of kernel bugs.
71101 +
71102 + The tradeoff is that some virtualization solutions may experience
71103 + a huge slowdown and therefore you should not enable this feature
71104 + for kernels meant to run in such environments. Whether a given VM
71105 + solution is affected or not is best determined by simply trying it
71106 + out, the performance impact will be obvious right on boot as this
71107 + mechanism engages from very early on. A good rule of thumb is that
71108 + VMs running on CPUs without hardware virtualization support (i.e.,
71109 + the majority of IA-32 CPUs) will likely experience the slowdown.
71110 +
71111 +config PAX_REFCOUNT
71112 + bool "Prevent various kernel object reference counter overflows"
71113 + depends on GRKERNSEC && (X86 || SPARC64)
71114 + help
71115 + By saying Y here the kernel will detect and prevent overflowing
71116 + various (but not all) kinds of object reference counters. Such
71117 + overflows can normally occur due to bugs only and are often, if
71118 + not always, exploitable.
71119 +
71120 + The tradeoff is that data structures protected by an overflowed
71121 + refcount will never be freed and therefore will leak memory. Note
71122 + that this leak also happens even without this protection but in
71123 + that case the overflow can eventually trigger the freeing of the
71124 + data structure while it is still being used elsewhere, resulting
71125 + in the exploitable situation that this feature prevents.
71126 +
71127 + Since this has a negligible performance impact, you should enable
71128 + this feature.
71129 +
71130 +config PAX_USERCOPY
71131 + bool "Harden heap object copies between kernel and userland"
71132 + depends on X86 || PPC || SPARC
71133 + depends on GRKERNSEC && (SLAB || SLUB)
71134 + help
71135 + By saying Y here the kernel will enforce the size of heap objects
71136 + when they are copied in either direction between the kernel and
71137 + userland, even if only a part of the heap object is copied.
71138 +
71139 + Specifically, this checking prevents information leaking from the
71140 + kernel heap during kernel to userland copies (if the kernel heap
71141 + object is otherwise fully initialized) and prevents kernel heap
71142 + overflows during userland to kernel copies.
71143 +
71144 + Note that the current implementation provides the strictest bounds
71145 + checks for the SLUB allocator.
71146 +
71147 + Enabling this option also enables per-slab cache protection against
71148 + data in a given cache being copied into/out of via userland
71149 + accessors. Though the whitelist of regions will be reduced over
71150 + time, it notably protects important data structures like task structs.
71151 +
71152 +
71153 + If frame pointers are enabled on x86, this option will also
71154 + restrict copies into and out of the kernel stack to local variables
71155 + within a single frame.
71156 +
71157 + Since this has a negligible performance impact, you should enable
71158 + this feature.
71159 +
71160 +endmenu
71161 +
71162 +endmenu
71163 +
71164 config KEYS
71165 bool "Enable access key retention support"
71166 help
71167 @@ -146,7 +695,7 @@ config INTEL_TXT
71168 config LSM_MMAP_MIN_ADDR
71169 int "Low address space for LSM to protect from user allocation"
71170 depends on SECURITY && SECURITY_SELINUX
71171 - default 65536
71172 + default 32768
71173 help
71174 This is the portion of low virtual memory which should be protected
71175 from userspace allocation. Keeping a user from writing to low pages
71176 diff -urNp linux-2.6.32.42/security/keys/keyring.c linux-2.6.32.42/security/keys/keyring.c
71177 --- linux-2.6.32.42/security/keys/keyring.c 2011-03-27 14:31:47.000000000 -0400
71178 +++ linux-2.6.32.42/security/keys/keyring.c 2011-04-18 22:03:00.000000000 -0400
71179 @@ -214,15 +214,15 @@ static long keyring_read(const struct ke
71180 ret = -EFAULT;
71181
71182 for (loop = 0; loop < klist->nkeys; loop++) {
71183 + key_serial_t serial;
71184 key = klist->keys[loop];
71185 + serial = key->serial;
71186
71187 tmp = sizeof(key_serial_t);
71188 if (tmp > buflen)
71189 tmp = buflen;
71190
71191 - if (copy_to_user(buffer,
71192 - &key->serial,
71193 - tmp) != 0)
71194 + if (copy_to_user(buffer, &serial, tmp))
71195 goto error;
71196
71197 buflen -= tmp;
71198 diff -urNp linux-2.6.32.42/security/min_addr.c linux-2.6.32.42/security/min_addr.c
71199 --- linux-2.6.32.42/security/min_addr.c 2011-03-27 14:31:47.000000000 -0400
71200 +++ linux-2.6.32.42/security/min_addr.c 2011-04-17 15:56:46.000000000 -0400
71201 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
71202 */
71203 static void update_mmap_min_addr(void)
71204 {
71205 +#ifndef SPARC
71206 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
71207 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
71208 mmap_min_addr = dac_mmap_min_addr;
71209 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
71210 #else
71211 mmap_min_addr = dac_mmap_min_addr;
71212 #endif
71213 +#endif
71214 }
71215
71216 /*
71217 diff -urNp linux-2.6.32.42/security/root_plug.c linux-2.6.32.42/security/root_plug.c
71218 --- linux-2.6.32.42/security/root_plug.c 2011-03-27 14:31:47.000000000 -0400
71219 +++ linux-2.6.32.42/security/root_plug.c 2011-04-17 15:56:46.000000000 -0400
71220 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security
71221 return 0;
71222 }
71223
71224 -static struct security_operations rootplug_security_ops = {
71225 +static struct security_operations rootplug_security_ops __read_only = {
71226 .bprm_check_security = rootplug_bprm_check_security,
71227 };
71228
71229 diff -urNp linux-2.6.32.42/security/security.c linux-2.6.32.42/security/security.c
71230 --- linux-2.6.32.42/security/security.c 2011-03-27 14:31:47.000000000 -0400
71231 +++ linux-2.6.32.42/security/security.c 2011-04-17 15:56:46.000000000 -0400
71232 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURI
71233 extern struct security_operations default_security_ops;
71234 extern void security_fixup_ops(struct security_operations *ops);
71235
71236 -struct security_operations *security_ops; /* Initialized to NULL */
71237 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
71238
71239 static inline int verify(struct security_operations *ops)
71240 {
71241 @@ -106,7 +106,7 @@ int __init security_module_enable(struct
71242 * If there is already a security module registered with the kernel,
71243 * an error will be returned. Otherwise %0 is returned on success.
71244 */
71245 -int register_security(struct security_operations *ops)
71246 +int __init register_security(struct security_operations *ops)
71247 {
71248 if (verify(ops)) {
71249 printk(KERN_DEBUG "%s could not verify "
71250 diff -urNp linux-2.6.32.42/security/selinux/hooks.c linux-2.6.32.42/security/selinux/hooks.c
71251 --- linux-2.6.32.42/security/selinux/hooks.c 2011-03-27 14:31:47.000000000 -0400
71252 +++ linux-2.6.32.42/security/selinux/hooks.c 2011-04-17 15:56:46.000000000 -0400
71253 @@ -131,7 +131,7 @@ int selinux_enabled = 1;
71254 * Minimal support for a secondary security module,
71255 * just to allow the use of the capability module.
71256 */
71257 -static struct security_operations *secondary_ops;
71258 +static struct security_operations *secondary_ops __read_only;
71259
71260 /* Lists of inode and superblock security structures initialized
71261 before the policy was loaded. */
71262 @@ -5457,7 +5457,7 @@ static int selinux_key_getsecurity(struc
71263
71264 #endif
71265
71266 -static struct security_operations selinux_ops = {
71267 +static struct security_operations selinux_ops __read_only = {
71268 .name = "selinux",
71269
71270 .ptrace_access_check = selinux_ptrace_access_check,
71271 @@ -5841,7 +5841,9 @@ int selinux_disable(void)
71272 avc_disable();
71273
71274 /* Reset security_ops to the secondary module, dummy or capability. */
71275 + pax_open_kernel();
71276 security_ops = secondary_ops;
71277 + pax_close_kernel();
71278
71279 /* Unregister netfilter hooks. */
71280 selinux_nf_ip_exit();
71281 diff -urNp linux-2.6.32.42/security/selinux/include/xfrm.h linux-2.6.32.42/security/selinux/include/xfrm.h
71282 --- linux-2.6.32.42/security/selinux/include/xfrm.h 2011-03-27 14:31:47.000000000 -0400
71283 +++ linux-2.6.32.42/security/selinux/include/xfrm.h 2011-05-18 20:09:37.000000000 -0400
71284 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
71285
71286 static inline void selinux_xfrm_notify_policyload(void)
71287 {
71288 - atomic_inc(&flow_cache_genid);
71289 + atomic_inc_unchecked(&flow_cache_genid);
71290 }
71291 #else
71292 static inline int selinux_xfrm_enabled(void)
71293 diff -urNp linux-2.6.32.42/security/selinux/ss/services.c linux-2.6.32.42/security/selinux/ss/services.c
71294 --- linux-2.6.32.42/security/selinux/ss/services.c 2011-03-27 14:31:47.000000000 -0400
71295 +++ linux-2.6.32.42/security/selinux/ss/services.c 2011-05-16 21:46:57.000000000 -0400
71296 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, siz
71297 int rc = 0;
71298 struct policy_file file = { data, len }, *fp = &file;
71299
71300 + pax_track_stack();
71301 +
71302 if (!ss_initialized) {
71303 avtab_cache_init();
71304 if (policydb_read(&policydb, fp)) {
71305 diff -urNp linux-2.6.32.42/security/smack/smack_lsm.c linux-2.6.32.42/security/smack/smack_lsm.c
71306 --- linux-2.6.32.42/security/smack/smack_lsm.c 2011-03-27 14:31:47.000000000 -0400
71307 +++ linux-2.6.32.42/security/smack/smack_lsm.c 2011-04-17 15:56:46.000000000 -0400
71308 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct
71309 return 0;
71310 }
71311
71312 -struct security_operations smack_ops = {
71313 +struct security_operations smack_ops __read_only = {
71314 .name = "smack",
71315
71316 .ptrace_access_check = smack_ptrace_access_check,
71317 diff -urNp linux-2.6.32.42/security/tomoyo/tomoyo.c linux-2.6.32.42/security/tomoyo/tomoyo.c
71318 --- linux-2.6.32.42/security/tomoyo/tomoyo.c 2011-03-27 14:31:47.000000000 -0400
71319 +++ linux-2.6.32.42/security/tomoyo/tomoyo.c 2011-04-17 15:56:46.000000000 -0400
71320 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct fil
71321 * tomoyo_security_ops is a "struct security_operations" which is used for
71322 * registering TOMOYO.
71323 */
71324 -static struct security_operations tomoyo_security_ops = {
71325 +static struct security_operations tomoyo_security_ops __read_only = {
71326 .name = "tomoyo",
71327 .cred_alloc_blank = tomoyo_cred_alloc_blank,
71328 .cred_prepare = tomoyo_cred_prepare,
71329 diff -urNp linux-2.6.32.42/sound/aoa/codecs/onyx.c linux-2.6.32.42/sound/aoa/codecs/onyx.c
71330 --- linux-2.6.32.42/sound/aoa/codecs/onyx.c 2011-03-27 14:31:47.000000000 -0400
71331 +++ linux-2.6.32.42/sound/aoa/codecs/onyx.c 2011-04-17 15:56:46.000000000 -0400
71332 @@ -53,7 +53,7 @@ struct onyx {
71333 spdif_locked:1,
71334 analog_locked:1,
71335 original_mute:2;
71336 - int open_count;
71337 + local_t open_count;
71338 struct codec_info *codec_info;
71339
71340 /* mutex serializes concurrent access to the device
71341 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_i
71342 struct onyx *onyx = cii->codec_data;
71343
71344 mutex_lock(&onyx->mutex);
71345 - onyx->open_count++;
71346 + local_inc(&onyx->open_count);
71347 mutex_unlock(&onyx->mutex);
71348
71349 return 0;
71350 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_
71351 struct onyx *onyx = cii->codec_data;
71352
71353 mutex_lock(&onyx->mutex);
71354 - onyx->open_count--;
71355 - if (!onyx->open_count)
71356 + if (local_dec_and_test(&onyx->open_count))
71357 onyx->spdif_locked = onyx->analog_locked = 0;
71358 mutex_unlock(&onyx->mutex);
71359
71360 diff -urNp linux-2.6.32.42/sound/aoa/codecs/onyx.h linux-2.6.32.42/sound/aoa/codecs/onyx.h
71361 --- linux-2.6.32.42/sound/aoa/codecs/onyx.h 2011-03-27 14:31:47.000000000 -0400
71362 +++ linux-2.6.32.42/sound/aoa/codecs/onyx.h 2011-04-17 15:56:46.000000000 -0400
71363 @@ -11,6 +11,7 @@
71364 #include <linux/i2c.h>
71365 #include <asm/pmac_low_i2c.h>
71366 #include <asm/prom.h>
71367 +#include <asm/local.h>
71368
71369 /* PCM3052 register definitions */
71370
71371 diff -urNp linux-2.6.32.42/sound/drivers/mts64.c linux-2.6.32.42/sound/drivers/mts64.c
71372 --- linux-2.6.32.42/sound/drivers/mts64.c 2011-03-27 14:31:47.000000000 -0400
71373 +++ linux-2.6.32.42/sound/drivers/mts64.c 2011-04-17 15:56:46.000000000 -0400
71374 @@ -27,6 +27,7 @@
71375 #include <sound/initval.h>
71376 #include <sound/rawmidi.h>
71377 #include <sound/control.h>
71378 +#include <asm/local.h>
71379
71380 #define CARD_NAME "Miditerminal 4140"
71381 #define DRIVER_NAME "MTS64"
71382 @@ -65,7 +66,7 @@ struct mts64 {
71383 struct pardevice *pardev;
71384 int pardev_claimed;
71385
71386 - int open_count;
71387 + local_t open_count;
71388 int current_midi_output_port;
71389 int current_midi_input_port;
71390 u8 mode[MTS64_NUM_INPUT_PORTS];
71391 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct
71392 {
71393 struct mts64 *mts = substream->rmidi->private_data;
71394
71395 - if (mts->open_count == 0) {
71396 + if (local_read(&mts->open_count) == 0) {
71397 /* We don't need a spinlock here, because this is just called
71398 if the device has not been opened before.
71399 So there aren't any IRQs from the device */
71400 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct
71401
71402 msleep(50);
71403 }
71404 - ++(mts->open_count);
71405 + local_inc(&mts->open_count);
71406
71407 return 0;
71408 }
71409 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struc
71410 struct mts64 *mts = substream->rmidi->private_data;
71411 unsigned long flags;
71412
71413 - --(mts->open_count);
71414 - if (mts->open_count == 0) {
71415 + if (local_dec_return(&mts->open_count) == 0) {
71416 /* We need the spinlock_irqsave here because we can still
71417 have IRQs at this point */
71418 spin_lock_irqsave(&mts->lock, flags);
71419 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struc
71420
71421 msleep(500);
71422
71423 - } else if (mts->open_count < 0)
71424 - mts->open_count = 0;
71425 + } else if (local_read(&mts->open_count) < 0)
71426 + local_set(&mts->open_count, 0);
71427
71428 return 0;
71429 }
71430 diff -urNp linux-2.6.32.42/sound/drivers/portman2x4.c linux-2.6.32.42/sound/drivers/portman2x4.c
71431 --- linux-2.6.32.42/sound/drivers/portman2x4.c 2011-03-27 14:31:47.000000000 -0400
71432 +++ linux-2.6.32.42/sound/drivers/portman2x4.c 2011-04-17 15:56:46.000000000 -0400
71433 @@ -46,6 +46,7 @@
71434 #include <sound/initval.h>
71435 #include <sound/rawmidi.h>
71436 #include <sound/control.h>
71437 +#include <asm/local.h>
71438
71439 #define CARD_NAME "Portman 2x4"
71440 #define DRIVER_NAME "portman"
71441 @@ -83,7 +84,7 @@ struct portman {
71442 struct pardevice *pardev;
71443 int pardev_claimed;
71444
71445 - int open_count;
71446 + local_t open_count;
71447 int mode[PORTMAN_NUM_INPUT_PORTS];
71448 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
71449 };
71450 diff -urNp linux-2.6.32.42/sound/oss/sb_audio.c linux-2.6.32.42/sound/oss/sb_audio.c
71451 --- linux-2.6.32.42/sound/oss/sb_audio.c 2011-03-27 14:31:47.000000000 -0400
71452 +++ linux-2.6.32.42/sound/oss/sb_audio.c 2011-04-17 15:56:46.000000000 -0400
71453 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
71454 buf16 = (signed short *)(localbuf + localoffs);
71455 while (c)
71456 {
71457 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
71458 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
71459 if (copy_from_user(lbuf8,
71460 userbuf+useroffs + p,
71461 locallen))
71462 diff -urNp linux-2.6.32.42/sound/oss/swarm_cs4297a.c linux-2.6.32.42/sound/oss/swarm_cs4297a.c
71463 --- linux-2.6.32.42/sound/oss/swarm_cs4297a.c 2011-03-27 14:31:47.000000000 -0400
71464 +++ linux-2.6.32.42/sound/oss/swarm_cs4297a.c 2011-04-17 15:56:46.000000000 -0400
71465 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
71466 {
71467 struct cs4297a_state *s;
71468 u32 pwr, id;
71469 - mm_segment_t fs;
71470 int rval;
71471 #ifndef CONFIG_BCM_CS4297A_CSWARM
71472 u64 cfg;
71473 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
71474 if (!rval) {
71475 char *sb1250_duart_present;
71476
71477 +#if 0
71478 + mm_segment_t fs;
71479 fs = get_fs();
71480 set_fs(KERNEL_DS);
71481 -#if 0
71482 val = SOUND_MASK_LINE;
71483 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
71484 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
71485 val = initvol[i].vol;
71486 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
71487 }
71488 + set_fs(fs);
71489 // cs4297a_write_ac97(s, 0x18, 0x0808);
71490 #else
71491 // cs4297a_write_ac97(s, 0x5e, 0x180);
71492 cs4297a_write_ac97(s, 0x02, 0x0808);
71493 cs4297a_write_ac97(s, 0x18, 0x0808);
71494 #endif
71495 - set_fs(fs);
71496
71497 list_add(&s->list, &cs4297a_devs);
71498
71499 diff -urNp linux-2.6.32.42/sound/pci/ac97/ac97_codec.c linux-2.6.32.42/sound/pci/ac97/ac97_codec.c
71500 --- linux-2.6.32.42/sound/pci/ac97/ac97_codec.c 2011-03-27 14:31:47.000000000 -0400
71501 +++ linux-2.6.32.42/sound/pci/ac97/ac97_codec.c 2011-04-17 15:56:46.000000000 -0400
71502 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struc
71503 }
71504
71505 /* build_ops to do nothing */
71506 -static struct snd_ac97_build_ops null_build_ops;
71507 +static const struct snd_ac97_build_ops null_build_ops;
71508
71509 #ifdef CONFIG_SND_AC97_POWER_SAVE
71510 static void do_update_power(struct work_struct *work)
71511 diff -urNp linux-2.6.32.42/sound/pci/ac97/ac97_patch.c linux-2.6.32.42/sound/pci/ac97/ac97_patch.c
71512 --- linux-2.6.32.42/sound/pci/ac97/ac97_patch.c 2011-03-27 14:31:47.000000000 -0400
71513 +++ linux-2.6.32.42/sound/pci/ac97/ac97_patch.c 2011-04-23 12:56:12.000000000 -0400
71514 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spd
71515 return 0;
71516 }
71517
71518 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
71519 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
71520 .build_spdif = patch_yamaha_ymf743_build_spdif,
71521 .build_3d = patch_yamaha_ymf7x3_3d,
71522 };
71523 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdi
71524 return 0;
71525 }
71526
71527 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
71528 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
71529 .build_3d = patch_yamaha_ymf7x3_3d,
71530 .build_post_spdif = patch_yamaha_ymf753_post_spdif
71531 };
71532 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific
71533 return 0;
71534 }
71535
71536 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
71537 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
71538 .build_specific = patch_wolfson_wm9703_specific,
71539 };
71540
71541 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific
71542 return 0;
71543 }
71544
71545 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
71546 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
71547 .build_specific = patch_wolfson_wm9704_specific,
71548 };
71549
71550 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific
71551 return 0;
71552 }
71553
71554 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
71555 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
71556 .build_specific = patch_wolfson_wm9705_specific,
71557 };
71558
71559 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific
71560 return 0;
71561 }
71562
71563 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
71564 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
71565 .build_specific = patch_wolfson_wm9711_specific,
71566 };
71567
71568 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume
71569 }
71570 #endif
71571
71572 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
71573 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
71574 .build_specific = patch_wolfson_wm9713_specific,
71575 .build_3d = patch_wolfson_wm9713_3d,
71576 #ifdef CONFIG_PM
71577 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_speci
71578 return 0;
71579 }
71580
71581 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
71582 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
71583 .build_3d = patch_sigmatel_stac9700_3d,
71584 .build_specific = patch_sigmatel_stac97xx_specific
71585 };
71586 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_speci
71587 return patch_sigmatel_stac97xx_specific(ac97);
71588 }
71589
71590 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
71591 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
71592 .build_3d = patch_sigmatel_stac9708_3d,
71593 .build_specific = patch_sigmatel_stac9708_specific
71594 };
71595 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_speci
71596 return 0;
71597 }
71598
71599 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
71600 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
71601 .build_3d = patch_sigmatel_stac9700_3d,
71602 .build_specific = patch_sigmatel_stac9758_specific
71603 };
71604 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(stru
71605 return 0;
71606 }
71607
71608 -static struct snd_ac97_build_ops patch_cirrus_ops = {
71609 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
71610 .build_spdif = patch_cirrus_build_spdif
71611 };
71612
71613 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(st
71614 return 0;
71615 }
71616
71617 -static struct snd_ac97_build_ops patch_conexant_ops = {
71618 +static const struct snd_ac97_build_ops patch_conexant_ops = {
71619 .build_spdif = patch_conexant_build_spdif
71620 };
71621
71622 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct
71623 }
71624 }
71625
71626 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
71627 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
71628 #ifdef CONFIG_PM
71629 .resume = ad18xx_resume
71630 #endif
71631 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct
71632 return 0;
71633 }
71634
71635 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
71636 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
71637 .build_specific = &patch_ad1885_specific,
71638 #ifdef CONFIG_PM
71639 .resume = ad18xx_resume
71640 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct
71641 return 0;
71642 }
71643
71644 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
71645 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
71646 .build_specific = &patch_ad1886_specific,
71647 #ifdef CONFIG_PM
71648 .resume = ad18xx_resume
71649 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct
71650 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
71651 }
71652
71653 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
71654 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
71655 .build_post_spdif = patch_ad198x_post_spdif,
71656 .build_specific = patch_ad1981a_specific,
71657 #ifdef CONFIG_PM
71658 @@ -1951,7 +1951,7 @@ static int patch_ad1981b_specific(struct
71659 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
71660 }
71661
71662 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
71663 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
71664 .build_post_spdif = patch_ad198x_post_spdif,
71665 .build_specific = patch_ad1981b_specific,
71666 #ifdef CONFIG_PM
71667 @@ -2090,7 +2090,7 @@ static int patch_ad1888_specific(struct
71668 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
71669 }
71670
71671 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
71672 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
71673 .build_post_spdif = patch_ad198x_post_spdif,
71674 .build_specific = patch_ad1888_specific,
71675 #ifdef CONFIG_PM
71676 @@ -2139,7 +2139,7 @@ static int patch_ad1980_specific(struct
71677 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
71678 }
71679
71680 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
71681 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
71682 .build_post_spdif = patch_ad198x_post_spdif,
71683 .build_specific = patch_ad1980_specific,
71684 #ifdef CONFIG_PM
71685 @@ -2254,7 +2254,7 @@ static int patch_ad1985_specific(struct
71686 ARRAY_SIZE(snd_ac97_ad1985_controls));
71687 }
71688
71689 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
71690 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
71691 .build_post_spdif = patch_ad198x_post_spdif,
71692 .build_specific = patch_ad1985_specific,
71693 #ifdef CONFIG_PM
71694 @@ -2546,7 +2546,7 @@ static int patch_ad1986_specific(struct
71695 ARRAY_SIZE(snd_ac97_ad1985_controls));
71696 }
71697
71698 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
71699 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
71700 .build_post_spdif = patch_ad198x_post_spdif,
71701 .build_specific = patch_ad1986_specific,
71702 #ifdef CONFIG_PM
71703 @@ -2651,7 +2651,7 @@ static int patch_alc650_specific(struct
71704 return 0;
71705 }
71706
71707 -static struct snd_ac97_build_ops patch_alc650_ops = {
71708 +static const struct snd_ac97_build_ops patch_alc650_ops = {
71709 .build_specific = patch_alc650_specific,
71710 .update_jacks = alc650_update_jacks
71711 };
71712 @@ -2803,7 +2803,7 @@ static int patch_alc655_specific(struct
71713 return 0;
71714 }
71715
71716 -static struct snd_ac97_build_ops patch_alc655_ops = {
71717 +static const struct snd_ac97_build_ops patch_alc655_ops = {
71718 .build_specific = patch_alc655_specific,
71719 .update_jacks = alc655_update_jacks
71720 };
71721 @@ -2915,7 +2915,7 @@ static int patch_alc850_specific(struct
71722 return 0;
71723 }
71724
71725 -static struct snd_ac97_build_ops patch_alc850_ops = {
71726 +static const struct snd_ac97_build_ops patch_alc850_ops = {
71727 .build_specific = patch_alc850_specific,
71728 .update_jacks = alc850_update_jacks
71729 };
71730 @@ -2977,7 +2977,7 @@ static int patch_cm9738_specific(struct
71731 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
71732 }
71733
71734 -static struct snd_ac97_build_ops patch_cm9738_ops = {
71735 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
71736 .build_specific = patch_cm9738_specific,
71737 .update_jacks = cm9738_update_jacks
71738 };
71739 @@ -3068,7 +3068,7 @@ static int patch_cm9739_post_spdif(struc
71740 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
71741 }
71742
71743 -static struct snd_ac97_build_ops patch_cm9739_ops = {
71744 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
71745 .build_specific = patch_cm9739_specific,
71746 .build_post_spdif = patch_cm9739_post_spdif,
71747 .update_jacks = cm9739_update_jacks
71748 @@ -3242,7 +3242,7 @@ static int patch_cm9761_specific(struct
71749 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
71750 }
71751
71752 -static struct snd_ac97_build_ops patch_cm9761_ops = {
71753 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
71754 .build_specific = patch_cm9761_specific,
71755 .build_post_spdif = patch_cm9761_post_spdif,
71756 .update_jacks = cm9761_update_jacks
71757 @@ -3338,7 +3338,7 @@ static int patch_cm9780_specific(struct
71758 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
71759 }
71760
71761 -static struct snd_ac97_build_ops patch_cm9780_ops = {
71762 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
71763 .build_specific = patch_cm9780_specific,
71764 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
71765 };
71766 @@ -3458,7 +3458,7 @@ static int patch_vt1616_specific(struct
71767 return 0;
71768 }
71769
71770 -static struct snd_ac97_build_ops patch_vt1616_ops = {
71771 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
71772 .build_specific = patch_vt1616_specific
71773 };
71774
71775 @@ -3812,7 +3812,7 @@ static int patch_it2646_specific(struct
71776 return 0;
71777 }
71778
71779 -static struct snd_ac97_build_ops patch_it2646_ops = {
71780 +static const struct snd_ac97_build_ops patch_it2646_ops = {
71781 .build_specific = patch_it2646_specific,
71782 .update_jacks = it2646_update_jacks
71783 };
71784 @@ -3846,7 +3846,7 @@ static int patch_si3036_specific(struct
71785 return 0;
71786 }
71787
71788 -static struct snd_ac97_build_ops patch_si3036_ops = {
71789 +static const struct snd_ac97_build_ops patch_si3036_ops = {
71790 .build_specific = patch_si3036_specific,
71791 };
71792
71793 @@ -3913,7 +3913,7 @@ static int patch_ucb1400_specific(struct
71794 return 0;
71795 }
71796
71797 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
71798 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
71799 .build_specific = patch_ucb1400_specific,
71800 };
71801
71802 diff -urNp linux-2.6.32.42/sound/pci/hda/patch_intelhdmi.c linux-2.6.32.42/sound/pci/hda/patch_intelhdmi.c
71803 --- linux-2.6.32.42/sound/pci/hda/patch_intelhdmi.c 2011-03-27 14:31:47.000000000 -0400
71804 +++ linux-2.6.32.42/sound/pci/hda/patch_intelhdmi.c 2011-04-17 15:56:46.000000000 -0400
71805 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(str
71806 cp_ready);
71807
71808 /* TODO */
71809 - if (cp_state)
71810 - ;
71811 - if (cp_ready)
71812 - ;
71813 + if (cp_state) {
71814 + }
71815 + if (cp_ready) {
71816 + }
71817 }
71818
71819
71820 diff -urNp linux-2.6.32.42/sound/pci/intel8x0m.c linux-2.6.32.42/sound/pci/intel8x0m.c
71821 --- linux-2.6.32.42/sound/pci/intel8x0m.c 2011-03-27 14:31:47.000000000 -0400
71822 +++ linux-2.6.32.42/sound/pci/intel8x0m.c 2011-04-23 12:56:12.000000000 -0400
71823 @@ -1264,7 +1264,7 @@ static struct shortname_table {
71824 { 0x5455, "ALi M5455" },
71825 { 0x746d, "AMD AMD8111" },
71826 #endif
71827 - { 0 },
71828 + { 0, },
71829 };
71830
71831 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
71832 diff -urNp linux-2.6.32.42/sound/pci/ymfpci/ymfpci_main.c linux-2.6.32.42/sound/pci/ymfpci/ymfpci_main.c
71833 --- linux-2.6.32.42/sound/pci/ymfpci/ymfpci_main.c 2011-03-27 14:31:47.000000000 -0400
71834 +++ linux-2.6.32.42/sound/pci/ymfpci/ymfpci_main.c 2011-05-04 17:56:28.000000000 -0400
71835 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
71836 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
71837 break;
71838 }
71839 - if (atomic_read(&chip->interrupt_sleep_count)) {
71840 - atomic_set(&chip->interrupt_sleep_count, 0);
71841 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
71842 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
71843 wake_up(&chip->interrupt_sleep);
71844 }
71845 __end:
71846 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
71847 continue;
71848 init_waitqueue_entry(&wait, current);
71849 add_wait_queue(&chip->interrupt_sleep, &wait);
71850 - atomic_inc(&chip->interrupt_sleep_count);
71851 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
71852 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
71853 remove_wait_queue(&chip->interrupt_sleep, &wait);
71854 }
71855 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
71856 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
71857 spin_unlock(&chip->reg_lock);
71858
71859 - if (atomic_read(&chip->interrupt_sleep_count)) {
71860 - atomic_set(&chip->interrupt_sleep_count, 0);
71861 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
71862 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
71863 wake_up(&chip->interrupt_sleep);
71864 }
71865 }
71866 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct s
71867 spin_lock_init(&chip->reg_lock);
71868 spin_lock_init(&chip->voice_lock);
71869 init_waitqueue_head(&chip->interrupt_sleep);
71870 - atomic_set(&chip->interrupt_sleep_count, 0);
71871 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
71872 chip->card = card;
71873 chip->pci = pci;
71874 chip->irq = -1;
71875 diff -urNp linux-2.6.32.42/tools/gcc/Makefile linux-2.6.32.42/tools/gcc/Makefile
71876 --- linux-2.6.32.42/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
71877 +++ linux-2.6.32.42/tools/gcc/Makefile 2011-06-04 20:52:13.000000000 -0400
71878 @@ -0,0 +1,11 @@
71879 +#CC := gcc
71880 +#PLUGIN_SOURCE_FILES := pax_plugin.c
71881 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
71882 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
71883 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
71884 +
71885 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
71886 +
71887 +hostlibs-y := pax_plugin.so
71888 +always := $(hostlibs-y)
71889 +pax_plugin-objs := pax_plugin.o
71890 diff -urNp linux-2.6.32.42/tools/gcc/pax_plugin.c linux-2.6.32.42/tools/gcc/pax_plugin.c
71891 --- linux-2.6.32.42/tools/gcc/pax_plugin.c 1969-12-31 19:00:00.000000000 -0500
71892 +++ linux-2.6.32.42/tools/gcc/pax_plugin.c 2011-06-04 20:52:13.000000000 -0400
71893 @@ -0,0 +1,242 @@
71894 +/*
71895 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
71896 + * Licensed under the GPL v2
71897 + *
71898 + * Note: the choice of the license means that the compilation process is
71899 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
71900 + * but for the kernel it doesn't matter since it doesn't link against
71901 + * any of the gcc libraries
71902 + *
71903 + * gcc plugin to help implement various PaX features
71904 + *
71905 + * - track lowest stack pointer
71906 + *
71907 + * TODO:
71908 + * - initialize all local variables
71909 + *
71910 + * BUGS:
71911 + */
71912 +#include "gcc-plugin.h"
71913 +#include "plugin-version.h"
71914 +#include "config.h"
71915 +#include "system.h"
71916 +#include "coretypes.h"
71917 +#include "tm.h"
71918 +#include "toplev.h"
71919 +#include "basic-block.h"
71920 +#include "gimple.h"
71921 +//#include "expr.h" where are you...
71922 +#include "diagnostic.h"
71923 +#include "rtl.h"
71924 +#include "emit-rtl.h"
71925 +#include "function.h"
71926 +#include "tree.h"
71927 +#include "tree-pass.h"
71928 +#include "intl.h"
71929 +
71930 +int plugin_is_GPL_compatible;
71931 +
71932 +static int track_frame_size = -1;
71933 +static const char track_function[] = "pax_track_stack";
71934 +static bool init_locals;
71935 +
71936 +static struct plugin_info pax_plugin_info = {
71937 + .version = "201106030000",
71938 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
71939 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
71940 +};
71941 +
71942 +static bool gate_pax_track_stack(void);
71943 +static unsigned int execute_pax_tree_instrument(void);
71944 +static unsigned int execute_pax_final(void);
71945 +
71946 +static struct gimple_opt_pass pax_tree_instrument_pass = {
71947 + .pass = {
71948 + .type = GIMPLE_PASS,
71949 + .name = "pax_tree_instrument",
71950 + .gate = gate_pax_track_stack,
71951 + .execute = execute_pax_tree_instrument,
71952 + .sub = NULL,
71953 + .next = NULL,
71954 + .static_pass_number = 0,
71955 + .tv_id = TV_NONE,
71956 + .properties_required = PROP_gimple_leh | PROP_cfg,
71957 + .properties_provided = 0,
71958 + .properties_destroyed = 0,
71959 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
71960 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
71961 + }
71962 +};
71963 +
71964 +static struct rtl_opt_pass pax_final_rtl_opt_pass = {
71965 + .pass = {
71966 + .type = RTL_PASS,
71967 + .name = "pax_final",
71968 + .gate = gate_pax_track_stack,
71969 + .execute = execute_pax_final,
71970 + .sub = NULL,
71971 + .next = NULL,
71972 + .static_pass_number = 0,
71973 + .tv_id = TV_NONE,
71974 + .properties_required = 0,
71975 + .properties_provided = 0,
71976 + .properties_destroyed = 0,
71977 + .todo_flags_start = 0,
71978 + .todo_flags_finish = 0
71979 + }
71980 +};
71981 +
71982 +static bool gate_pax_track_stack(void)
71983 +{
71984 + return track_frame_size >= 0;
71985 +}
71986 +
71987 +static void pax_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
71988 +{
71989 + gimple call;
71990 + tree decl, type;
71991 +
71992 + // insert call to void pax_track_stack(void)
71993 + type = build_function_type_list(void_type_node, NULL_TREE);
71994 + decl = build_fn_decl(track_function, type);
71995 + DECL_ASSEMBLER_NAME(decl); // for LTO
71996 + call = gimple_build_call(decl, 0);
71997 + if (before)
71998 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
71999 + else
72000 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
72001 +}
72002 +
72003 +static unsigned int execute_pax_tree_instrument(void)
72004 +{
72005 + basic_block bb;
72006 + gimple_stmt_iterator gsi;
72007 +
72008 + // 1. loop through BBs and GIMPLE statements
72009 + FOR_EACH_BB(bb) {
72010 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
72011 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
72012 + tree decl;
72013 + gimple stmt = gsi_stmt(gsi);
72014 +
72015 + if (!is_gimple_call(stmt))
72016 + continue;
72017 + decl = gimple_call_fndecl(stmt);
72018 + if (!decl)
72019 + continue;
72020 + if (TREE_CODE(decl) != FUNCTION_DECL)
72021 + continue;
72022 + if (!DECL_BUILT_IN(decl))
72023 + continue;
72024 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
72025 + continue;
72026 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
72027 + continue;
72028 +
72029 + // 2. insert track call after each __builtin_alloca call
72030 + pax_add_instrumentation(&gsi, false);
72031 +// print_node(stderr, "pax", decl, 4);
72032 + }
72033 + }
72034 +
72035 + // 3. insert track call at the beginning
72036 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
72037 + gsi = gsi_start_bb(bb);
72038 + pax_add_instrumentation(&gsi, true);
72039 +
72040 + return 0;
72041 +}
72042 +
72043 +static unsigned int execute_pax_final(void)
72044 +{
72045 + rtx insn;
72046 +
72047 + if (cfun->calls_alloca)
72048 + return 0;
72049 +
72050 + // 1. find pax_track_stack calls
72051 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
72052 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
72053 + rtx body;
72054 +
72055 + if (!CALL_P(insn))
72056 + continue;
72057 + body = PATTERN(insn);
72058 + if (GET_CODE(body) != CALL)
72059 + continue;
72060 + body = XEXP(body, 0);
72061 + if (GET_CODE(body) != MEM)
72062 + continue;
72063 + body = XEXP(body, 0);
72064 + if (GET_CODE(body) != SYMBOL_REF)
72065 + continue;
72066 + if (strcmp(XSTR(body, 0), track_function))
72067 + continue;
72068 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
72069 + // 2. delete call if function frame is not big enough
72070 + if (get_frame_size() >= track_frame_size)
72071 + continue;
72072 + delete_insn_and_edges(insn);
72073 + }
72074 +
72075 +// print_simple_rtl(stderr, get_insns());
72076 +// print_rtl(stderr, get_insns());
72077 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
72078 +
72079 + return 0;
72080 +}
72081 +
72082 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
72083 +{
72084 + const char * const plugin_name = plugin_info->base_name;
72085 + const int argc = plugin_info->argc;
72086 + const struct plugin_argument * const argv = plugin_info->argv;
72087 + int i;
72088 + struct register_pass_info pax_tree_instrument_pass_info = {
72089 + .pass = &pax_tree_instrument_pass.pass,
72090 +// .reference_pass_name = "tree_profile",
72091 + .reference_pass_name = "optimized",
72092 + .ref_pass_instance_number = 0,
72093 + .pos_op = PASS_POS_INSERT_AFTER
72094 + };
72095 + struct register_pass_info pax_final_pass_info = {
72096 + .pass = &pax_final_rtl_opt_pass.pass,
72097 + .reference_pass_name = "final",
72098 + .ref_pass_instance_number = 0,
72099 + .pos_op = PASS_POS_INSERT_BEFORE
72100 + };
72101 +
72102 + if (!plugin_default_version_check(version, &gcc_version)) {
72103 + error(G_("incompatible gcc/plugin versions"));
72104 + return 1;
72105 + }
72106 +
72107 + register_callback(plugin_name, PLUGIN_INFO, NULL, &pax_plugin_info);
72108 +
72109 + for (i = 0; i < argc; ++i) {
72110 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
72111 + if (!argv[i].value) {
72112 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
72113 + continue;
72114 + }
72115 + track_frame_size = atoi(argv[i].value);
72116 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
72117 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
72118 + continue;
72119 + }
72120 + if (!strcmp(argv[i].key, "initialize-locals")) {
72121 + if (argv[i].value) {
72122 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
72123 + continue;
72124 + }
72125 + init_locals = true;
72126 + continue;
72127 + }
72128 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
72129 + }
72130 +
72131 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_tree_instrument_pass_info);
72132 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_final_pass_info);
72133 +
72134 + return 0;
72135 +}
72136 Binary files linux-2.6.32.42/tools/gcc/pax_plugin.so and linux-2.6.32.42/tools/gcc/pax_plugin.so differ
72137 diff -urNp linux-2.6.32.42/usr/gen_init_cpio.c linux-2.6.32.42/usr/gen_init_cpio.c
72138 --- linux-2.6.32.42/usr/gen_init_cpio.c 2011-03-27 14:31:47.000000000 -0400
72139 +++ linux-2.6.32.42/usr/gen_init_cpio.c 2011-04-17 15:56:46.000000000 -0400
72140 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name,
72141 int retval;
72142 int rc = -1;
72143 int namesize;
72144 - int i;
72145 + unsigned int i;
72146
72147 mode |= S_IFREG;
72148
72149 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_
72150 *env_var = *expanded = '\0';
72151 strncat(env_var, start + 2, end - start - 2);
72152 strncat(expanded, new_location, start - new_location);
72153 - strncat(expanded, getenv(env_var), PATH_MAX);
72154 - strncat(expanded, end + 1, PATH_MAX);
72155 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
72156 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
72157 strncpy(new_location, expanded, PATH_MAX);
72158 + new_location[PATH_MAX] = 0;
72159 } else
72160 break;
72161 }
72162 diff -urNp linux-2.6.32.42/virt/kvm/kvm_main.c linux-2.6.32.42/virt/kvm/kvm_main.c
72163 --- linux-2.6.32.42/virt/kvm/kvm_main.c 2011-03-27 14:31:47.000000000 -0400
72164 +++ linux-2.6.32.42/virt/kvm/kvm_main.c 2011-04-23 21:41:37.000000000 -0400
72165 @@ -1748,6 +1748,7 @@ static int kvm_vcpu_release(struct inode
72166 return 0;
72167 }
72168
72169 +/* cannot be const */
72170 static struct file_operations kvm_vcpu_fops = {
72171 .release = kvm_vcpu_release,
72172 .unlocked_ioctl = kvm_vcpu_ioctl,
72173 @@ -2344,6 +2345,7 @@ static int kvm_vm_mmap(struct file *file
72174 return 0;
72175 }
72176
72177 +/* cannot be const */
72178 static struct file_operations kvm_vm_fops = {
72179 .release = kvm_vm_release,
72180 .unlocked_ioctl = kvm_vm_ioctl,
72181 @@ -2431,6 +2433,7 @@ out:
72182 return r;
72183 }
72184
72185 +/* cannot be const */
72186 static struct file_operations kvm_chardev_ops = {
72187 .unlocked_ioctl = kvm_dev_ioctl,
72188 .compat_ioctl = kvm_dev_ioctl,
72189 @@ -2494,7 +2497,7 @@ asmlinkage void kvm_handle_fault_on_rebo
72190 if (kvm_rebooting)
72191 /* spin while reset goes on */
72192 while (true)
72193 - ;
72194 + cpu_relax();
72195 /* Fault while not rebooting. We want the trace. */
72196 BUG();
72197 }
72198 @@ -2714,7 +2717,7 @@ static void kvm_sched_out(struct preempt
72199 kvm_arch_vcpu_put(vcpu);
72200 }
72201
72202 -int kvm_init(void *opaque, unsigned int vcpu_size,
72203 +int kvm_init(const void *opaque, unsigned int vcpu_size,
72204 struct module *module)
72205 {
72206 int r;
72207 @@ -2767,7 +2770,7 @@ int kvm_init(void *opaque, unsigned int
72208 /* A kmem cache lets us meet the alignment requirements of fx_save. */
72209 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
72210 __alignof__(struct kvm_vcpu),
72211 - 0, NULL);
72212 + SLAB_USERCOPY, NULL);
72213 if (!kvm_vcpu_cache) {
72214 r = -ENOMEM;
72215 goto out_free_5;