]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.32.42-201106251302.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.42-201106251302.patch
1 diff -urNp linux-2.6.32.42/arch/alpha/include/asm/elf.h linux-2.6.32.42/arch/alpha/include/asm/elf.h
2 --- linux-2.6.32.42/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3 +++ linux-2.6.32.42/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.32.42/arch/alpha/include/asm/pgtable.h linux-2.6.32.42/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.32.42/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20 +++ linux-2.6.32.42/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.32.42/arch/alpha/kernel/module.c linux-2.6.32.42/arch/alpha/kernel/module.c
40 --- linux-2.6.32.42/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41 +++ linux-2.6.32.42/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.32.42/arch/alpha/kernel/osf_sys.c linux-2.6.32.42/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.32.42/arch/alpha/kernel/osf_sys.c 2011-03-27 14:31:47.000000000 -0400
53 +++ linux-2.6.32.42/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54 @@ -431,7 +431,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char
55 return -EFAULT;
56
57 len = namelen;
58 - if (namelen > 32)
59 + if (len > 32)
60 len = 32;
61
62 down_read(&uts_sem);
63 @@ -618,7 +618,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, comman
64 down_read(&uts_sem);
65 res = sysinfo_table[offset];
66 len = strlen(res)+1;
67 - if (len > count)
68 + if ((unsigned long)len > (unsigned long)count)
69 len = count;
70 if (copy_to_user(buf, res, len))
71 err = -EFAULT;
72 @@ -673,7 +673,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned
73 return 1;
74
75 case GSI_GET_HWRPB:
76 - if (nbytes < sizeof(*hwrpb))
77 + if (nbytes > sizeof(*hwrpb))
78 return -EINVAL;
79 if (copy_to_user(buffer, hwrpb, nbytes) != 0)
80 return -EFAULT;
81 @@ -1035,6 +1035,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
82 {
83 struct rusage r;
84 long ret, err;
85 + unsigned int status = 0;
86 mm_segment_t old_fs;
87
88 if (!ur)
89 @@ -1043,13 +1044,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
90 old_fs = get_fs();
91
92 set_fs (KERNEL_DS);
93 - ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
94 + ret = sys_wait4(pid, (unsigned int __user *) &status, options,
95 + (struct rusage __user *) &r);
96 set_fs (old_fs);
97
98 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
99 return -EFAULT;
100
101 err = 0;
102 + err |= put_user(status, ustatus);
103 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
104 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
105 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
106 @@ -1169,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
107 /* At this point: (!vma || addr < vma->vm_end). */
108 if (limit - len < addr)
109 return -ENOMEM;
110 - if (!vma || addr + len <= vma->vm_start)
111 + if (check_heap_stack_gap(vma, addr, len))
112 return addr;
113 addr = vma->vm_end;
114 vma = vma->vm_next;
115 @@ -1205,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
116 merely specific addresses, but regions of memory -- perhaps
117 this feature should be incorporated into all ports? */
118
119 +#ifdef CONFIG_PAX_RANDMMAP
120 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
121 +#endif
122 +
123 if (addr) {
124 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
125 if (addr != (unsigned long) -ENOMEM)
126 @@ -1212,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
127 }
128
129 /* Next, try allocating at TASK_UNMAPPED_BASE. */
130 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
131 - len, limit);
132 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
133 +
134 if (addr != (unsigned long) -ENOMEM)
135 return addr;
136
137 diff -urNp linux-2.6.32.42/arch/alpha/mm/fault.c linux-2.6.32.42/arch/alpha/mm/fault.c
138 --- linux-2.6.32.42/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
139 +++ linux-2.6.32.42/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
140 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
141 __reload_thread(pcb);
142 }
143
144 +#ifdef CONFIG_PAX_PAGEEXEC
145 +/*
146 + * PaX: decide what to do with offenders (regs->pc = fault address)
147 + *
148 + * returns 1 when task should be killed
149 + * 2 when patched PLT trampoline was detected
150 + * 3 when unpatched PLT trampoline was detected
151 + */
152 +static int pax_handle_fetch_fault(struct pt_regs *regs)
153 +{
154 +
155 +#ifdef CONFIG_PAX_EMUPLT
156 + int err;
157 +
158 + do { /* PaX: patched PLT emulation #1 */
159 + unsigned int ldah, ldq, jmp;
160 +
161 + err = get_user(ldah, (unsigned int *)regs->pc);
162 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
163 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
164 +
165 + if (err)
166 + break;
167 +
168 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
169 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
170 + jmp == 0x6BFB0000U)
171 + {
172 + unsigned long r27, addr;
173 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
174 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
175 +
176 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
177 + err = get_user(r27, (unsigned long *)addr);
178 + if (err)
179 + break;
180 +
181 + regs->r27 = r27;
182 + regs->pc = r27;
183 + return 2;
184 + }
185 + } while (0);
186 +
187 + do { /* PaX: patched PLT emulation #2 */
188 + unsigned int ldah, lda, br;
189 +
190 + err = get_user(ldah, (unsigned int *)regs->pc);
191 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
192 + err |= get_user(br, (unsigned int *)(regs->pc+8));
193 +
194 + if (err)
195 + break;
196 +
197 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
198 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
199 + (br & 0xFFE00000U) == 0xC3E00000U)
200 + {
201 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
202 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
203 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
204 +
205 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
206 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
207 + return 2;
208 + }
209 + } while (0);
210 +
211 + do { /* PaX: unpatched PLT emulation */
212 + unsigned int br;
213 +
214 + err = get_user(br, (unsigned int *)regs->pc);
215 +
216 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
217 + unsigned int br2, ldq, nop, jmp;
218 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
219 +
220 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
221 + err = get_user(br2, (unsigned int *)addr);
222 + err |= get_user(ldq, (unsigned int *)(addr+4));
223 + err |= get_user(nop, (unsigned int *)(addr+8));
224 + err |= get_user(jmp, (unsigned int *)(addr+12));
225 + err |= get_user(resolver, (unsigned long *)(addr+16));
226 +
227 + if (err)
228 + break;
229 +
230 + if (br2 == 0xC3600000U &&
231 + ldq == 0xA77B000CU &&
232 + nop == 0x47FF041FU &&
233 + jmp == 0x6B7B0000U)
234 + {
235 + regs->r28 = regs->pc+4;
236 + regs->r27 = addr+16;
237 + regs->pc = resolver;
238 + return 3;
239 + }
240 + }
241 + } while (0);
242 +#endif
243 +
244 + return 1;
245 +}
246 +
247 +void pax_report_insns(void *pc, void *sp)
248 +{
249 + unsigned long i;
250 +
251 + printk(KERN_ERR "PAX: bytes at PC: ");
252 + for (i = 0; i < 5; i++) {
253 + unsigned int c;
254 + if (get_user(c, (unsigned int *)pc+i))
255 + printk(KERN_CONT "???????? ");
256 + else
257 + printk(KERN_CONT "%08x ", c);
258 + }
259 + printk("\n");
260 +}
261 +#endif
262
263 /*
264 * This routine handles page faults. It determines the address,
265 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
266 good_area:
267 si_code = SEGV_ACCERR;
268 if (cause < 0) {
269 - if (!(vma->vm_flags & VM_EXEC))
270 + if (!(vma->vm_flags & VM_EXEC)) {
271 +
272 +#ifdef CONFIG_PAX_PAGEEXEC
273 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
274 + goto bad_area;
275 +
276 + up_read(&mm->mmap_sem);
277 + switch (pax_handle_fetch_fault(regs)) {
278 +
279 +#ifdef CONFIG_PAX_EMUPLT
280 + case 2:
281 + case 3:
282 + return;
283 +#endif
284 +
285 + }
286 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
287 + do_group_exit(SIGKILL);
288 +#else
289 goto bad_area;
290 +#endif
291 +
292 + }
293 } else if (!cause) {
294 /* Allow reads even for write-only mappings */
295 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
296 diff -urNp linux-2.6.32.42/arch/arm/include/asm/elf.h linux-2.6.32.42/arch/arm/include/asm/elf.h
297 --- linux-2.6.32.42/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
298 +++ linux-2.6.32.42/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
299 @@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
300 the loader. We need to make sure that it is out of the way of the program
301 that it will "exec", and that there is sufficient room for the brk. */
302
303 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
304 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
305 +
306 +#ifdef CONFIG_PAX_ASLR
307 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
308 +
309 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
310 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
311 +#endif
312
313 /* When the program starts, a1 contains a pointer to a function to be
314 registered with atexit, as per the SVR4 ABI. A value of 0 means we
315 diff -urNp linux-2.6.32.42/arch/arm/include/asm/kmap_types.h linux-2.6.32.42/arch/arm/include/asm/kmap_types.h
316 --- linux-2.6.32.42/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
317 +++ linux-2.6.32.42/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
318 @@ -19,6 +19,7 @@ enum km_type {
319 KM_SOFTIRQ0,
320 KM_SOFTIRQ1,
321 KM_L2_CACHE,
322 + KM_CLEARPAGE,
323 KM_TYPE_NR
324 };
325
326 diff -urNp linux-2.6.32.42/arch/arm/include/asm/uaccess.h linux-2.6.32.42/arch/arm/include/asm/uaccess.h
327 --- linux-2.6.32.42/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
328 +++ linux-2.6.32.42/arch/arm/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
329 @@ -403,6 +403,9 @@ extern unsigned long __must_check __strn
330
331 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
332 {
333 + if ((long)n < 0)
334 + return n;
335 +
336 if (access_ok(VERIFY_READ, from, n))
337 n = __copy_from_user(to, from, n);
338 else /* security hole - plug it */
339 @@ -412,6 +415,9 @@ static inline unsigned long __must_check
340
341 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
342 {
343 + if ((long)n < 0)
344 + return n;
345 +
346 if (access_ok(VERIFY_WRITE, to, n))
347 n = __copy_to_user(to, from, n);
348 return n;
349 diff -urNp linux-2.6.32.42/arch/arm/kernel/kgdb.c linux-2.6.32.42/arch/arm/kernel/kgdb.c
350 --- linux-2.6.32.42/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
351 +++ linux-2.6.32.42/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
352 @@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
353 * and we handle the normal undef case within the do_undefinstr
354 * handler.
355 */
356 -struct kgdb_arch arch_kgdb_ops = {
357 +const struct kgdb_arch arch_kgdb_ops = {
358 #ifndef __ARMEB__
359 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
360 #else /* ! __ARMEB__ */
361 diff -urNp linux-2.6.32.42/arch/arm/kernel/traps.c linux-2.6.32.42/arch/arm/kernel/traps.c
362 --- linux-2.6.32.42/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
363 +++ linux-2.6.32.42/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
364 @@ -247,6 +247,8 @@ static void __die(const char *str, int e
365
366 DEFINE_SPINLOCK(die_lock);
367
368 +extern void gr_handle_kernel_exploit(void);
369 +
370 /*
371 * This function is protected against re-entrancy.
372 */
373 @@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
374 if (panic_on_oops)
375 panic("Fatal exception");
376
377 + gr_handle_kernel_exploit();
378 +
379 do_exit(SIGSEGV);
380 }
381
382 diff -urNp linux-2.6.32.42/arch/arm/mach-at91/pm.c linux-2.6.32.42/arch/arm/mach-at91/pm.c
383 --- linux-2.6.32.42/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
384 +++ linux-2.6.32.42/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
385 @@ -348,7 +348,7 @@ static void at91_pm_end(void)
386 }
387
388
389 -static struct platform_suspend_ops at91_pm_ops ={
390 +static const struct platform_suspend_ops at91_pm_ops ={
391 .valid = at91_pm_valid_state,
392 .begin = at91_pm_begin,
393 .enter = at91_pm_enter,
394 diff -urNp linux-2.6.32.42/arch/arm/mach-omap1/pm.c linux-2.6.32.42/arch/arm/mach-omap1/pm.c
395 --- linux-2.6.32.42/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
396 +++ linux-2.6.32.42/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
397 @@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
398
399
400
401 -static struct platform_suspend_ops omap_pm_ops ={
402 +static const struct platform_suspend_ops omap_pm_ops ={
403 .prepare = omap_pm_prepare,
404 .enter = omap_pm_enter,
405 .finish = omap_pm_finish,
406 diff -urNp linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c
407 --- linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
408 +++ linux-2.6.32.42/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
409 @@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
410 enable_hlt();
411 }
412
413 -static struct platform_suspend_ops omap_pm_ops = {
414 +static const struct platform_suspend_ops omap_pm_ops = {
415 .prepare = omap2_pm_prepare,
416 .enter = omap2_pm_enter,
417 .finish = omap2_pm_finish,
418 diff -urNp linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c
419 --- linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
420 +++ linux-2.6.32.42/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
421 @@ -401,7 +401,7 @@ static void omap3_pm_end(void)
422 return;
423 }
424
425 -static struct platform_suspend_ops omap_pm_ops = {
426 +static const struct platform_suspend_ops omap_pm_ops = {
427 .begin = omap3_pm_begin,
428 .end = omap3_pm_end,
429 .prepare = omap3_pm_prepare,
430 diff -urNp linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c
431 --- linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
432 +++ linux-2.6.32.42/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
433 @@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
434 (state == PM_SUSPEND_MEM);
435 }
436
437 -static struct platform_suspend_ops pnx4008_pm_ops = {
438 +static const struct platform_suspend_ops pnx4008_pm_ops = {
439 .enter = pnx4008_pm_enter,
440 .valid = pnx4008_pm_valid,
441 };
442 diff -urNp linux-2.6.32.42/arch/arm/mach-pxa/pm.c linux-2.6.32.42/arch/arm/mach-pxa/pm.c
443 --- linux-2.6.32.42/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
444 +++ linux-2.6.32.42/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
445 @@ -95,7 +95,7 @@ void pxa_pm_finish(void)
446 pxa_cpu_pm_fns->finish();
447 }
448
449 -static struct platform_suspend_ops pxa_pm_ops = {
450 +static const struct platform_suspend_ops pxa_pm_ops = {
451 .valid = pxa_pm_valid,
452 .enter = pxa_pm_enter,
453 .prepare = pxa_pm_prepare,
454 diff -urNp linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c
455 --- linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
456 +++ linux-2.6.32.42/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
457 @@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
458 }
459
460 #ifdef CONFIG_PM
461 -static struct platform_suspend_ops sharpsl_pm_ops = {
462 +static const struct platform_suspend_ops sharpsl_pm_ops = {
463 .prepare = pxa_pm_prepare,
464 .finish = pxa_pm_finish,
465 .enter = corgi_pxa_pm_enter,
466 diff -urNp linux-2.6.32.42/arch/arm/mach-sa1100/pm.c linux-2.6.32.42/arch/arm/mach-sa1100/pm.c
467 --- linux-2.6.32.42/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
468 +++ linux-2.6.32.42/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
469 @@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
470 return virt_to_phys(sp);
471 }
472
473 -static struct platform_suspend_ops sa11x0_pm_ops = {
474 +static const struct platform_suspend_ops sa11x0_pm_ops = {
475 .enter = sa11x0_pm_enter,
476 .valid = suspend_valid_only_mem,
477 };
478 diff -urNp linux-2.6.32.42/arch/arm/mm/fault.c linux-2.6.32.42/arch/arm/mm/fault.c
479 --- linux-2.6.32.42/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
480 +++ linux-2.6.32.42/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
481 @@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
482 }
483 #endif
484
485 +#ifdef CONFIG_PAX_PAGEEXEC
486 + if (fsr & FSR_LNX_PF) {
487 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
488 + do_group_exit(SIGKILL);
489 + }
490 +#endif
491 +
492 tsk->thread.address = addr;
493 tsk->thread.error_code = fsr;
494 tsk->thread.trap_no = 14;
495 @@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
496 }
497 #endif /* CONFIG_MMU */
498
499 +#ifdef CONFIG_PAX_PAGEEXEC
500 +void pax_report_insns(void *pc, void *sp)
501 +{
502 + long i;
503 +
504 + printk(KERN_ERR "PAX: bytes at PC: ");
505 + for (i = 0; i < 20; i++) {
506 + unsigned char c;
507 + if (get_user(c, (__force unsigned char __user *)pc+i))
508 + printk(KERN_CONT "?? ");
509 + else
510 + printk(KERN_CONT "%02x ", c);
511 + }
512 + printk("\n");
513 +
514 + printk(KERN_ERR "PAX: bytes at SP-4: ");
515 + for (i = -1; i < 20; i++) {
516 + unsigned long c;
517 + if (get_user(c, (__force unsigned long __user *)sp+i))
518 + printk(KERN_CONT "???????? ");
519 + else
520 + printk(KERN_CONT "%08lx ", c);
521 + }
522 + printk("\n");
523 +}
524 +#endif
525 +
526 /*
527 * First Level Translation Fault Handler
528 *
529 diff -urNp linux-2.6.32.42/arch/arm/mm/mmap.c linux-2.6.32.42/arch/arm/mm/mmap.c
530 --- linux-2.6.32.42/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
531 +++ linux-2.6.32.42/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
532 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
533 if (len > TASK_SIZE)
534 return -ENOMEM;
535
536 +#ifdef CONFIG_PAX_RANDMMAP
537 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
538 +#endif
539 +
540 if (addr) {
541 if (do_align)
542 addr = COLOUR_ALIGN(addr, pgoff);
543 @@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
544 addr = PAGE_ALIGN(addr);
545
546 vma = find_vma(mm, addr);
547 - if (TASK_SIZE - len >= addr &&
548 - (!vma || addr + len <= vma->vm_start))
549 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
550 return addr;
551 }
552 if (len > mm->cached_hole_size) {
553 - start_addr = addr = mm->free_area_cache;
554 + start_addr = addr = mm->free_area_cache;
555 } else {
556 - start_addr = addr = TASK_UNMAPPED_BASE;
557 - mm->cached_hole_size = 0;
558 + start_addr = addr = mm->mmap_base;
559 + mm->cached_hole_size = 0;
560 }
561
562 full_search:
563 @@ -94,14 +97,14 @@ full_search:
564 * Start a new search - just in case we missed
565 * some holes.
566 */
567 - if (start_addr != TASK_UNMAPPED_BASE) {
568 - start_addr = addr = TASK_UNMAPPED_BASE;
569 + if (start_addr != mm->mmap_base) {
570 + start_addr = addr = mm->mmap_base;
571 mm->cached_hole_size = 0;
572 goto full_search;
573 }
574 return -ENOMEM;
575 }
576 - if (!vma || addr + len <= vma->vm_start) {
577 + if (check_heap_stack_gap(vma, addr, len)) {
578 /*
579 * Remember the place where we stopped the search:
580 */
581 diff -urNp linux-2.6.32.42/arch/arm/plat-s3c/pm.c linux-2.6.32.42/arch/arm/plat-s3c/pm.c
582 --- linux-2.6.32.42/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
583 +++ linux-2.6.32.42/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
584 @@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
585 s3c_pm_check_cleanup();
586 }
587
588 -static struct platform_suspend_ops s3c_pm_ops = {
589 +static const struct platform_suspend_ops s3c_pm_ops = {
590 .enter = s3c_pm_enter,
591 .prepare = s3c_pm_prepare,
592 .finish = s3c_pm_finish,
593 diff -urNp linux-2.6.32.42/arch/avr32/include/asm/elf.h linux-2.6.32.42/arch/avr32/include/asm/elf.h
594 --- linux-2.6.32.42/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
595 +++ linux-2.6.32.42/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
596 @@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
597 the loader. We need to make sure that it is out of the way of the program
598 that it will "exec", and that there is sufficient room for the brk. */
599
600 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
601 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
602
603 +#ifdef CONFIG_PAX_ASLR
604 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
605 +
606 +#define PAX_DELTA_MMAP_LEN 15
607 +#define PAX_DELTA_STACK_LEN 15
608 +#endif
609
610 /* This yields a mask that user programs can use to figure out what
611 instruction set this CPU supports. This could be done in user space,
612 diff -urNp linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h
613 --- linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
614 +++ linux-2.6.32.42/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
615 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
616 D(11) KM_IRQ1,
617 D(12) KM_SOFTIRQ0,
618 D(13) KM_SOFTIRQ1,
619 -D(14) KM_TYPE_NR
620 +D(14) KM_CLEARPAGE,
621 +D(15) KM_TYPE_NR
622 };
623
624 #undef D
625 diff -urNp linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c
626 --- linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
627 +++ linux-2.6.32.42/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
628 @@ -176,7 +176,7 @@ out:
629 return 0;
630 }
631
632 -static struct platform_suspend_ops avr32_pm_ops = {
633 +static const struct platform_suspend_ops avr32_pm_ops = {
634 .valid = avr32_pm_valid_state,
635 .enter = avr32_pm_enter,
636 };
637 diff -urNp linux-2.6.32.42/arch/avr32/mm/fault.c linux-2.6.32.42/arch/avr32/mm/fault.c
638 --- linux-2.6.32.42/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
639 +++ linux-2.6.32.42/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
640 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
641
642 int exception_trace = 1;
643
644 +#ifdef CONFIG_PAX_PAGEEXEC
645 +void pax_report_insns(void *pc, void *sp)
646 +{
647 + unsigned long i;
648 +
649 + printk(KERN_ERR "PAX: bytes at PC: ");
650 + for (i = 0; i < 20; i++) {
651 + unsigned char c;
652 + if (get_user(c, (unsigned char *)pc+i))
653 + printk(KERN_CONT "???????? ");
654 + else
655 + printk(KERN_CONT "%02x ", c);
656 + }
657 + printk("\n");
658 +}
659 +#endif
660 +
661 /*
662 * This routine handles page faults. It determines the address and the
663 * problem, and then passes it off to one of the appropriate routines.
664 @@ -157,6 +174,16 @@ bad_area:
665 up_read(&mm->mmap_sem);
666
667 if (user_mode(regs)) {
668 +
669 +#ifdef CONFIG_PAX_PAGEEXEC
670 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
671 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
672 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
673 + do_group_exit(SIGKILL);
674 + }
675 + }
676 +#endif
677 +
678 if (exception_trace && printk_ratelimit())
679 printk("%s%s[%d]: segfault at %08lx pc %08lx "
680 "sp %08lx ecr %lu\n",
681 diff -urNp linux-2.6.32.42/arch/blackfin/kernel/kgdb.c linux-2.6.32.42/arch/blackfin/kernel/kgdb.c
682 --- linux-2.6.32.42/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
683 +++ linux-2.6.32.42/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
684 @@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
685 return -1; /* this means that we do not want to exit from the handler */
686 }
687
688 -struct kgdb_arch arch_kgdb_ops = {
689 +const struct kgdb_arch arch_kgdb_ops = {
690 .gdb_bpt_instr = {0xa1},
691 #ifdef CONFIG_SMP
692 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
693 diff -urNp linux-2.6.32.42/arch/blackfin/mach-common/pm.c linux-2.6.32.42/arch/blackfin/mach-common/pm.c
694 --- linux-2.6.32.42/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
695 +++ linux-2.6.32.42/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
696 @@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
697 return 0;
698 }
699
700 -struct platform_suspend_ops bfin_pm_ops = {
701 +const struct platform_suspend_ops bfin_pm_ops = {
702 .enter = bfin_pm_enter,
703 .valid = bfin_pm_valid,
704 };
705 diff -urNp linux-2.6.32.42/arch/frv/include/asm/kmap_types.h linux-2.6.32.42/arch/frv/include/asm/kmap_types.h
706 --- linux-2.6.32.42/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
707 +++ linux-2.6.32.42/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
708 @@ -23,6 +23,7 @@ enum km_type {
709 KM_IRQ1,
710 KM_SOFTIRQ0,
711 KM_SOFTIRQ1,
712 + KM_CLEARPAGE,
713 KM_TYPE_NR
714 };
715
716 diff -urNp linux-2.6.32.42/arch/frv/mm/elf-fdpic.c linux-2.6.32.42/arch/frv/mm/elf-fdpic.c
717 --- linux-2.6.32.42/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
718 +++ linux-2.6.32.42/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
719 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
720 if (addr) {
721 addr = PAGE_ALIGN(addr);
722 vma = find_vma(current->mm, addr);
723 - if (TASK_SIZE - len >= addr &&
724 - (!vma || addr + len <= vma->vm_start))
725 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
726 goto success;
727 }
728
729 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
730 for (; vma; vma = vma->vm_next) {
731 if (addr > limit)
732 break;
733 - if (addr + len <= vma->vm_start)
734 + if (check_heap_stack_gap(vma, addr, len))
735 goto success;
736 addr = vma->vm_end;
737 }
738 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742 - if (addr + len <= vma->vm_start)
743 + if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747 diff -urNp linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c
748 --- linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
749 +++ linux-2.6.32.42/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
750 @@ -17,7 +17,7 @@
751 #include <linux/swiotlb.h>
752 #include <asm/machvec.h>
753
754 -extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
755 +extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
756
757 /* swiotlb declarations & definitions: */
758 extern int swiotlb_late_init_with_default_size (size_t size);
759 @@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
760 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
761 }
762
763 -struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
764 +const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
765 {
766 if (use_swiotlb(dev))
767 return &swiotlb_dma_ops;
768 diff -urNp linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c
769 --- linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
770 +++ linux-2.6.32.42/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
771 @@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
772 },
773 };
774
775 -extern struct dma_map_ops swiotlb_dma_ops;
776 +extern const struct dma_map_ops swiotlb_dma_ops;
777
778 static int __init
779 sba_init(void)
780 @@ -2211,7 +2211,7 @@ sba_page_override(char *str)
781
782 __setup("sbapagesize=",sba_page_override);
783
784 -struct dma_map_ops sba_dma_ops = {
785 +const struct dma_map_ops sba_dma_ops = {
786 .alloc_coherent = sba_alloc_coherent,
787 .free_coherent = sba_free_coherent,
788 .map_page = sba_map_page,
789 diff -urNp linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c
790 --- linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
791 +++ linux-2.6.32.42/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
792 @@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
793
794 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
795
796 +#ifdef CONFIG_PAX_ASLR
797 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
798 +
799 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
800 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
801 +#endif
802 +
803 /* Ugly but avoids duplication */
804 #include "../../../fs/binfmt_elf.c"
805
806 diff -urNp linux-2.6.32.42/arch/ia64/ia32/ia32priv.h linux-2.6.32.42/arch/ia64/ia32/ia32priv.h
807 --- linux-2.6.32.42/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
808 +++ linux-2.6.32.42/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
809 @@ -296,7 +296,14 @@ typedef struct compat_siginfo {
810 #define ELF_DATA ELFDATA2LSB
811 #define ELF_ARCH EM_386
812
813 -#define IA32_STACK_TOP IA32_PAGE_OFFSET
814 +#ifdef CONFIG_PAX_RANDUSTACK
815 +#define __IA32_DELTA_STACK (current->mm->delta_stack)
816 +#else
817 +#define __IA32_DELTA_STACK 0UL
818 +#endif
819 +
820 +#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
821 +
822 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
823 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
824
825 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h
826 --- linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
827 +++ linux-2.6.32.42/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
828 @@ -12,7 +12,7 @@
829
830 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
831
832 -extern struct dma_map_ops *dma_ops;
833 +extern const struct dma_map_ops *dma_ops;
834 extern struct ia64_machine_vector ia64_mv;
835 extern void set_iommu_machvec(void);
836
837 @@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
838 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
839 dma_addr_t *daddr, gfp_t gfp)
840 {
841 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
842 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
843 void *caddr;
844
845 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
846 @@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
847 static inline void dma_free_coherent(struct device *dev, size_t size,
848 void *caddr, dma_addr_t daddr)
849 {
850 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
851 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
852 debug_dma_free_coherent(dev, size, caddr, daddr);
853 ops->free_coherent(dev, size, caddr, daddr);
854 }
855 @@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
856
857 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
858 {
859 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
860 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
861 return ops->mapping_error(dev, daddr);
862 }
863
864 static inline int dma_supported(struct device *dev, u64 mask)
865 {
866 - struct dma_map_ops *ops = platform_dma_get_ops(dev);
867 + const struct dma_map_ops *ops = platform_dma_get_ops(dev);
868 return ops->dma_supported(dev, mask);
869 }
870
871 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/elf.h linux-2.6.32.42/arch/ia64/include/asm/elf.h
872 --- linux-2.6.32.42/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
873 +++ linux-2.6.32.42/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
874 @@ -43,6 +43,13 @@
875 */
876 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
877
878 +#ifdef CONFIG_PAX_ASLR
879 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
880 +
881 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
882 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
883 +#endif
884 +
885 #define PT_IA_64_UNWIND 0x70000001
886
887 /* IA-64 relocations: */
888 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/machvec.h linux-2.6.32.42/arch/ia64/include/asm/machvec.h
889 --- linux-2.6.32.42/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
890 +++ linux-2.6.32.42/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
891 @@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
892 /* DMA-mapping interface: */
893 typedef void ia64_mv_dma_init (void);
894 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
895 -typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
896 +typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
897
898 /*
899 * WARNING: The legacy I/O space is _architected_. Platforms are
900 @@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
901 # endif /* CONFIG_IA64_GENERIC */
902
903 extern void swiotlb_dma_init(void);
904 -extern struct dma_map_ops *dma_get_ops(struct device *);
905 +extern const struct dma_map_ops *dma_get_ops(struct device *);
906
907 /*
908 * Define default versions so we can extend machvec for new platforms without having
909 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/pgtable.h linux-2.6.32.42/arch/ia64/include/asm/pgtable.h
910 --- linux-2.6.32.42/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
911 +++ linux-2.6.32.42/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
912 @@ -12,7 +12,7 @@
913 * David Mosberger-Tang <davidm@hpl.hp.com>
914 */
915
916 -
917 +#include <linux/const.h>
918 #include <asm/mman.h>
919 #include <asm/page.h>
920 #include <asm/processor.h>
921 @@ -143,6 +143,17 @@
922 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
923 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
924 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
925 +
926 +#ifdef CONFIG_PAX_PAGEEXEC
927 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
928 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
929 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
930 +#else
931 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
932 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
933 +# define PAGE_COPY_NOEXEC PAGE_COPY
934 +#endif
935 +
936 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
937 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
938 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
939 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/spinlock.h linux-2.6.32.42/arch/ia64/include/asm/spinlock.h
940 --- linux-2.6.32.42/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
941 +++ linux-2.6.32.42/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
942 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
943 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
944
945 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
946 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
947 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
948 }
949
950 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
951 diff -urNp linux-2.6.32.42/arch/ia64/include/asm/uaccess.h linux-2.6.32.42/arch/ia64/include/asm/uaccess.h
952 --- linux-2.6.32.42/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
953 +++ linux-2.6.32.42/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
954 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
955 const void *__cu_from = (from); \
956 long __cu_len = (n); \
957 \
958 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
959 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
960 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
961 __cu_len; \
962 })
963 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
964 long __cu_len = (n); \
965 \
966 __chk_user_ptr(__cu_from); \
967 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
968 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
969 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
970 __cu_len; \
971 })
972 diff -urNp linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c
973 --- linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
974 +++ linux-2.6.32.42/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
975 @@ -3,7 +3,7 @@
976 /* Set this to 1 if there is a HW IOMMU in the system */
977 int iommu_detected __read_mostly;
978
979 -struct dma_map_ops *dma_ops;
980 +const struct dma_map_ops *dma_ops;
981 EXPORT_SYMBOL(dma_ops);
982
983 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
984 @@ -16,7 +16,7 @@ static int __init dma_init(void)
985 }
986 fs_initcall(dma_init);
987
988 -struct dma_map_ops *dma_get_ops(struct device *dev)
989 +const struct dma_map_ops *dma_get_ops(struct device *dev)
990 {
991 return dma_ops;
992 }
993 diff -urNp linux-2.6.32.42/arch/ia64/kernel/module.c linux-2.6.32.42/arch/ia64/kernel/module.c
994 --- linux-2.6.32.42/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
995 +++ linux-2.6.32.42/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
996 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
997 void
998 module_free (struct module *mod, void *module_region)
999 {
1000 - if (mod && mod->arch.init_unw_table &&
1001 - module_region == mod->module_init) {
1002 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1003 unw_remove_unwind_table(mod->arch.init_unw_table);
1004 mod->arch.init_unw_table = NULL;
1005 }
1006 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1007 }
1008
1009 static inline int
1010 +in_init_rx (const struct module *mod, uint64_t addr)
1011 +{
1012 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1013 +}
1014 +
1015 +static inline int
1016 +in_init_rw (const struct module *mod, uint64_t addr)
1017 +{
1018 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1019 +}
1020 +
1021 +static inline int
1022 in_init (const struct module *mod, uint64_t addr)
1023 {
1024 - return addr - (uint64_t) mod->module_init < mod->init_size;
1025 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1026 +}
1027 +
1028 +static inline int
1029 +in_core_rx (const struct module *mod, uint64_t addr)
1030 +{
1031 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1032 +}
1033 +
1034 +static inline int
1035 +in_core_rw (const struct module *mod, uint64_t addr)
1036 +{
1037 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1038 }
1039
1040 static inline int
1041 in_core (const struct module *mod, uint64_t addr)
1042 {
1043 - return addr - (uint64_t) mod->module_core < mod->core_size;
1044 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1045 }
1046
1047 static inline int
1048 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1049 break;
1050
1051 case RV_BDREL:
1052 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1053 + if (in_init_rx(mod, val))
1054 + val -= (uint64_t) mod->module_init_rx;
1055 + else if (in_init_rw(mod, val))
1056 + val -= (uint64_t) mod->module_init_rw;
1057 + else if (in_core_rx(mod, val))
1058 + val -= (uint64_t) mod->module_core_rx;
1059 + else if (in_core_rw(mod, val))
1060 + val -= (uint64_t) mod->module_core_rw;
1061 break;
1062
1063 case RV_LTV:
1064 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1065 * addresses have been selected...
1066 */
1067 uint64_t gp;
1068 - if (mod->core_size > MAX_LTOFF)
1069 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1070 /*
1071 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1072 * at the end of the module.
1073 */
1074 - gp = mod->core_size - MAX_LTOFF / 2;
1075 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1076 else
1077 - gp = mod->core_size / 2;
1078 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1079 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1080 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1081 mod->arch.gp = gp;
1082 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1083 }
1084 diff -urNp linux-2.6.32.42/arch/ia64/kernel/pci-dma.c linux-2.6.32.42/arch/ia64/kernel/pci-dma.c
1085 --- linux-2.6.32.42/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1086 +++ linux-2.6.32.42/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1087 @@ -43,7 +43,7 @@ struct device fallback_dev = {
1088 .dma_mask = &fallback_dev.coherent_dma_mask,
1089 };
1090
1091 -extern struct dma_map_ops intel_dma_ops;
1092 +extern const struct dma_map_ops intel_dma_ops;
1093
1094 static int __init pci_iommu_init(void)
1095 {
1096 @@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1097 }
1098 EXPORT_SYMBOL(iommu_dma_supported);
1099
1100 +extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1101 +extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1102 +extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1103 +extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1104 +extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1105 +extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1106 +extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1107 +
1108 +static const struct dma_map_ops intel_iommu_dma_ops = {
1109 + /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1110 + .alloc_coherent = intel_alloc_coherent,
1111 + .free_coherent = intel_free_coherent,
1112 + .map_sg = intel_map_sg,
1113 + .unmap_sg = intel_unmap_sg,
1114 + .map_page = intel_map_page,
1115 + .unmap_page = intel_unmap_page,
1116 + .mapping_error = intel_mapping_error,
1117 +
1118 + .sync_single_for_cpu = machvec_dma_sync_single,
1119 + .sync_sg_for_cpu = machvec_dma_sync_sg,
1120 + .sync_single_for_device = machvec_dma_sync_single,
1121 + .sync_sg_for_device = machvec_dma_sync_sg,
1122 + .dma_supported = iommu_dma_supported,
1123 +};
1124 +
1125 void __init pci_iommu_alloc(void)
1126 {
1127 - dma_ops = &intel_dma_ops;
1128 -
1129 - dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1130 - dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1131 - dma_ops->sync_single_for_device = machvec_dma_sync_single;
1132 - dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1133 - dma_ops->dma_supported = iommu_dma_supported;
1134 + dma_ops = &intel_iommu_dma_ops;
1135
1136 /*
1137 * The order of these functions is important for
1138 diff -urNp linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c
1139 --- linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1140 +++ linux-2.6.32.42/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1141 @@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1142 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1143 }
1144
1145 -struct dma_map_ops swiotlb_dma_ops = {
1146 +const struct dma_map_ops swiotlb_dma_ops = {
1147 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1148 .free_coherent = swiotlb_free_coherent,
1149 .map_page = swiotlb_map_page,
1150 diff -urNp linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c
1151 --- linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1152 +++ linux-2.6.32.42/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1153 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1154 if (REGION_NUMBER(addr) == RGN_HPAGE)
1155 addr = 0;
1156 #endif
1157 +
1158 +#ifdef CONFIG_PAX_RANDMMAP
1159 + if (mm->pax_flags & MF_PAX_RANDMMAP)
1160 + addr = mm->free_area_cache;
1161 + else
1162 +#endif
1163 +
1164 if (!addr)
1165 addr = mm->free_area_cache;
1166
1167 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1168 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1169 /* At this point: (!vma || addr < vma->vm_end). */
1170 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1171 - if (start_addr != TASK_UNMAPPED_BASE) {
1172 + if (start_addr != mm->mmap_base) {
1173 /* Start a new search --- just in case we missed some holes. */
1174 - addr = TASK_UNMAPPED_BASE;
1175 + addr = mm->mmap_base;
1176 goto full_search;
1177 }
1178 return -ENOMEM;
1179 }
1180 - if (!vma || addr + len <= vma->vm_start) {
1181 + if (check_heap_stack_gap(vma, addr, len)) {
1182 /* Remember the address where we stopped this search: */
1183 mm->free_area_cache = addr + len;
1184 return addr;
1185 diff -urNp linux-2.6.32.42/arch/ia64/kernel/topology.c linux-2.6.32.42/arch/ia64/kernel/topology.c
1186 --- linux-2.6.32.42/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1187 +++ linux-2.6.32.42/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1188 @@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1189 return ret;
1190 }
1191
1192 -static struct sysfs_ops cache_sysfs_ops = {
1193 +static const struct sysfs_ops cache_sysfs_ops = {
1194 .show = cache_show
1195 };
1196
1197 diff -urNp linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S
1198 --- linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1199 +++ linux-2.6.32.42/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1200 @@ -190,7 +190,7 @@ SECTIONS
1201 /* Per-cpu data: */
1202 . = ALIGN(PERCPU_PAGE_SIZE);
1203 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1204 - __phys_per_cpu_start = __per_cpu_load;
1205 + __phys_per_cpu_start = per_cpu_load;
1206 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1207 * into percpu page size
1208 */
1209 diff -urNp linux-2.6.32.42/arch/ia64/mm/fault.c linux-2.6.32.42/arch/ia64/mm/fault.c
1210 --- linux-2.6.32.42/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1211 +++ linux-2.6.32.42/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1212 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1213 return pte_present(pte);
1214 }
1215
1216 +#ifdef CONFIG_PAX_PAGEEXEC
1217 +void pax_report_insns(void *pc, void *sp)
1218 +{
1219 + unsigned long i;
1220 +
1221 + printk(KERN_ERR "PAX: bytes at PC: ");
1222 + for (i = 0; i < 8; i++) {
1223 + unsigned int c;
1224 + if (get_user(c, (unsigned int *)pc+i))
1225 + printk(KERN_CONT "???????? ");
1226 + else
1227 + printk(KERN_CONT "%08x ", c);
1228 + }
1229 + printk("\n");
1230 +}
1231 +#endif
1232 +
1233 void __kprobes
1234 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1235 {
1236 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1237 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1238 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1239
1240 - if ((vma->vm_flags & mask) != mask)
1241 + if ((vma->vm_flags & mask) != mask) {
1242 +
1243 +#ifdef CONFIG_PAX_PAGEEXEC
1244 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1245 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1246 + goto bad_area;
1247 +
1248 + up_read(&mm->mmap_sem);
1249 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1250 + do_group_exit(SIGKILL);
1251 + }
1252 +#endif
1253 +
1254 goto bad_area;
1255
1256 + }
1257 +
1258 survive:
1259 /*
1260 * If for any reason at all we couldn't handle the fault, make
1261 diff -urNp linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c
1262 --- linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1263 +++ linux-2.6.32.42/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1264 @@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1265 /* At this point: (!vmm || addr < vmm->vm_end). */
1266 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1267 return -ENOMEM;
1268 - if (!vmm || (addr + len) <= vmm->vm_start)
1269 + if (check_heap_stack_gap(vmm, addr, len))
1270 return addr;
1271 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1272 }
1273 diff -urNp linux-2.6.32.42/arch/ia64/mm/init.c linux-2.6.32.42/arch/ia64/mm/init.c
1274 --- linux-2.6.32.42/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1275 +++ linux-2.6.32.42/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1276 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1277 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1278 vma->vm_end = vma->vm_start + PAGE_SIZE;
1279 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1280 +
1281 +#ifdef CONFIG_PAX_PAGEEXEC
1282 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1283 + vma->vm_flags &= ~VM_EXEC;
1284 +
1285 +#ifdef CONFIG_PAX_MPROTECT
1286 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1287 + vma->vm_flags &= ~VM_MAYEXEC;
1288 +#endif
1289 +
1290 + }
1291 +#endif
1292 +
1293 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1294 down_write(&current->mm->mmap_sem);
1295 if (insert_vm_struct(current->mm, vma)) {
1296 diff -urNp linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c
1297 --- linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1298 +++ linux-2.6.32.42/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1299 @@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1300 return ret;
1301 }
1302
1303 -static struct dma_map_ops sn_dma_ops = {
1304 +static const struct dma_map_ops sn_dma_ops = {
1305 .alloc_coherent = sn_dma_alloc_coherent,
1306 .free_coherent = sn_dma_free_coherent,
1307 .map_page = sn_dma_map_page,
1308 diff -urNp linux-2.6.32.42/arch/m32r/lib/usercopy.c linux-2.6.32.42/arch/m32r/lib/usercopy.c
1309 --- linux-2.6.32.42/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1310 +++ linux-2.6.32.42/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1311 @@ -14,6 +14,9 @@
1312 unsigned long
1313 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1314 {
1315 + if ((long)n < 0)
1316 + return n;
1317 +
1318 prefetch(from);
1319 if (access_ok(VERIFY_WRITE, to, n))
1320 __copy_user(to,from,n);
1321 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1322 unsigned long
1323 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1324 {
1325 + if ((long)n < 0)
1326 + return n;
1327 +
1328 prefetchw(to);
1329 if (access_ok(VERIFY_READ, from, n))
1330 __copy_user_zeroing(to,from,n);
1331 diff -urNp linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c
1332 --- linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1333 +++ linux-2.6.32.42/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1334 @@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1335
1336 }
1337
1338 -static struct platform_suspend_ops db1x_pm_ops = {
1339 +static const struct platform_suspend_ops db1x_pm_ops = {
1340 .valid = suspend_valid_only_mem,
1341 .begin = db1x_pm_begin,
1342 .enter = db1x_pm_enter,
1343 diff -urNp linux-2.6.32.42/arch/mips/include/asm/elf.h linux-2.6.32.42/arch/mips/include/asm/elf.h
1344 --- linux-2.6.32.42/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1345 +++ linux-2.6.32.42/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1346 @@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1347 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1348 #endif
1349
1350 +#ifdef CONFIG_PAX_ASLR
1351 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1352 +
1353 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1354 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1355 +#endif
1356 +
1357 #endif /* _ASM_ELF_H */
1358 diff -urNp linux-2.6.32.42/arch/mips/include/asm/page.h linux-2.6.32.42/arch/mips/include/asm/page.h
1359 --- linux-2.6.32.42/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1360 +++ linux-2.6.32.42/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1361 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1362 #ifdef CONFIG_CPU_MIPS32
1363 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1364 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1365 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1366 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1367 #else
1368 typedef struct { unsigned long long pte; } pte_t;
1369 #define pte_val(x) ((x).pte)
1370 diff -urNp linux-2.6.32.42/arch/mips/include/asm/system.h linux-2.6.32.42/arch/mips/include/asm/system.h
1371 --- linux-2.6.32.42/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1372 +++ linux-2.6.32.42/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1373 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1374 */
1375 #define __ARCH_WANT_UNLOCKED_CTXSW
1376
1377 -extern unsigned long arch_align_stack(unsigned long sp);
1378 +#define arch_align_stack(x) ((x) & ~0xfUL)
1379
1380 #endif /* _ASM_SYSTEM_H */
1381 diff -urNp linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c
1382 --- linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1383 +++ linux-2.6.32.42/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1384 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1385 #undef ELF_ET_DYN_BASE
1386 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1387
1388 +#ifdef CONFIG_PAX_ASLR
1389 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1390 +
1391 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1392 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1393 +#endif
1394 +
1395 #include <asm/processor.h>
1396 #include <linux/module.h>
1397 #include <linux/elfcore.h>
1398 diff -urNp linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c
1399 --- linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1400 +++ linux-2.6.32.42/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1401 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1402 #undef ELF_ET_DYN_BASE
1403 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1404
1405 +#ifdef CONFIG_PAX_ASLR
1406 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1407 +
1408 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1409 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1410 +#endif
1411 +
1412 #include <asm/processor.h>
1413
1414 /*
1415 diff -urNp linux-2.6.32.42/arch/mips/kernel/kgdb.c linux-2.6.32.42/arch/mips/kernel/kgdb.c
1416 --- linux-2.6.32.42/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1417 +++ linux-2.6.32.42/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1418 @@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1419 return -1;
1420 }
1421
1422 +/* cannot be const */
1423 struct kgdb_arch arch_kgdb_ops;
1424
1425 /*
1426 diff -urNp linux-2.6.32.42/arch/mips/kernel/process.c linux-2.6.32.42/arch/mips/kernel/process.c
1427 --- linux-2.6.32.42/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1428 +++ linux-2.6.32.42/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1429 @@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1430 out:
1431 return pc;
1432 }
1433 -
1434 -/*
1435 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1436 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1437 - */
1438 -unsigned long arch_align_stack(unsigned long sp)
1439 -{
1440 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1441 - sp -= get_random_int() & ~PAGE_MASK;
1442 -
1443 - return sp & ALMASK;
1444 -}
1445 diff -urNp linux-2.6.32.42/arch/mips/kernel/syscall.c linux-2.6.32.42/arch/mips/kernel/syscall.c
1446 --- linux-2.6.32.42/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1447 +++ linux-2.6.32.42/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1448 @@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1449 do_color_align = 0;
1450 if (filp || (flags & MAP_SHARED))
1451 do_color_align = 1;
1452 +
1453 +#ifdef CONFIG_PAX_RANDMMAP
1454 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1455 +#endif
1456 +
1457 if (addr) {
1458 if (do_color_align)
1459 addr = COLOUR_ALIGN(addr, pgoff);
1460 else
1461 addr = PAGE_ALIGN(addr);
1462 vmm = find_vma(current->mm, addr);
1463 - if (task_size - len >= addr &&
1464 - (!vmm || addr + len <= vmm->vm_start))
1465 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1466 return addr;
1467 }
1468 - addr = TASK_UNMAPPED_BASE;
1469 + addr = current->mm->mmap_base;
1470 if (do_color_align)
1471 addr = COLOUR_ALIGN(addr, pgoff);
1472 else
1473 @@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1474 /* At this point: (!vmm || addr < vmm->vm_end). */
1475 if (task_size - len < addr)
1476 return -ENOMEM;
1477 - if (!vmm || addr + len <= vmm->vm_start)
1478 + if (check_heap_stack_gap(vmm, addr, len))
1479 return addr;
1480 addr = vmm->vm_end;
1481 if (do_color_align)
1482 diff -urNp linux-2.6.32.42/arch/mips/mm/fault.c linux-2.6.32.42/arch/mips/mm/fault.c
1483 --- linux-2.6.32.42/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1484 +++ linux-2.6.32.42/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1485 @@ -26,6 +26,23 @@
1486 #include <asm/ptrace.h>
1487 #include <asm/highmem.h> /* For VMALLOC_END */
1488
1489 +#ifdef CONFIG_PAX_PAGEEXEC
1490 +void pax_report_insns(void *pc, void *sp)
1491 +{
1492 + unsigned long i;
1493 +
1494 + printk(KERN_ERR "PAX: bytes at PC: ");
1495 + for (i = 0; i < 5; i++) {
1496 + unsigned int c;
1497 + if (get_user(c, (unsigned int *)pc+i))
1498 + printk(KERN_CONT "???????? ");
1499 + else
1500 + printk(KERN_CONT "%08x ", c);
1501 + }
1502 + printk("\n");
1503 +}
1504 +#endif
1505 +
1506 /*
1507 * This routine handles page faults. It determines the address,
1508 * and the problem, and then passes it off to one of the appropriate
1509 diff -urNp linux-2.6.32.42/arch/parisc/include/asm/elf.h linux-2.6.32.42/arch/parisc/include/asm/elf.h
1510 --- linux-2.6.32.42/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1511 +++ linux-2.6.32.42/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1512 @@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1513
1514 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1515
1516 +#ifdef CONFIG_PAX_ASLR
1517 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1518 +
1519 +#define PAX_DELTA_MMAP_LEN 16
1520 +#define PAX_DELTA_STACK_LEN 16
1521 +#endif
1522 +
1523 /* This yields a mask that user programs can use to figure out what
1524 instruction set this CPU supports. This could be done in user space,
1525 but it's not easy, and we've already done it here. */
1526 diff -urNp linux-2.6.32.42/arch/parisc/include/asm/pgtable.h linux-2.6.32.42/arch/parisc/include/asm/pgtable.h
1527 --- linux-2.6.32.42/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1528 +++ linux-2.6.32.42/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1529 @@ -207,6 +207,17 @@
1530 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1531 #define PAGE_COPY PAGE_EXECREAD
1532 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1533 +
1534 +#ifdef CONFIG_PAX_PAGEEXEC
1535 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1536 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1537 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1538 +#else
1539 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1540 +# define PAGE_COPY_NOEXEC PAGE_COPY
1541 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1542 +#endif
1543 +
1544 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1545 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1546 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1547 diff -urNp linux-2.6.32.42/arch/parisc/kernel/module.c linux-2.6.32.42/arch/parisc/kernel/module.c
1548 --- linux-2.6.32.42/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1549 +++ linux-2.6.32.42/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1550 @@ -95,16 +95,38 @@
1551
1552 /* three functions to determine where in the module core
1553 * or init pieces the location is */
1554 +static inline int in_init_rx(struct module *me, void *loc)
1555 +{
1556 + return (loc >= me->module_init_rx &&
1557 + loc < (me->module_init_rx + me->init_size_rx));
1558 +}
1559 +
1560 +static inline int in_init_rw(struct module *me, void *loc)
1561 +{
1562 + return (loc >= me->module_init_rw &&
1563 + loc < (me->module_init_rw + me->init_size_rw));
1564 +}
1565 +
1566 static inline int in_init(struct module *me, void *loc)
1567 {
1568 - return (loc >= me->module_init &&
1569 - loc <= (me->module_init + me->init_size));
1570 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1571 +}
1572 +
1573 +static inline int in_core_rx(struct module *me, void *loc)
1574 +{
1575 + return (loc >= me->module_core_rx &&
1576 + loc < (me->module_core_rx + me->core_size_rx));
1577 +}
1578 +
1579 +static inline int in_core_rw(struct module *me, void *loc)
1580 +{
1581 + return (loc >= me->module_core_rw &&
1582 + loc < (me->module_core_rw + me->core_size_rw));
1583 }
1584
1585 static inline int in_core(struct module *me, void *loc)
1586 {
1587 - return (loc >= me->module_core &&
1588 - loc <= (me->module_core + me->core_size));
1589 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1590 }
1591
1592 static inline int in_local(struct module *me, void *loc)
1593 @@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1594 }
1595
1596 /* align things a bit */
1597 - me->core_size = ALIGN(me->core_size, 16);
1598 - me->arch.got_offset = me->core_size;
1599 - me->core_size += gots * sizeof(struct got_entry);
1600 -
1601 - me->core_size = ALIGN(me->core_size, 16);
1602 - me->arch.fdesc_offset = me->core_size;
1603 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1604 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1605 + me->arch.got_offset = me->core_size_rw;
1606 + me->core_size_rw += gots * sizeof(struct got_entry);
1607 +
1608 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1609 + me->arch.fdesc_offset = me->core_size_rw;
1610 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1611
1612 me->arch.got_max = gots;
1613 me->arch.fdesc_max = fdescs;
1614 @@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1615
1616 BUG_ON(value == 0);
1617
1618 - got = me->module_core + me->arch.got_offset;
1619 + got = me->module_core_rw + me->arch.got_offset;
1620 for (i = 0; got[i].addr; i++)
1621 if (got[i].addr == value)
1622 goto out;
1623 @@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1624 #ifdef CONFIG_64BIT
1625 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1626 {
1627 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1628 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1629
1630 if (!value) {
1631 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1632 @@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1633
1634 /* Create new one */
1635 fdesc->addr = value;
1636 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1637 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1638 return (Elf_Addr)fdesc;
1639 }
1640 #endif /* CONFIG_64BIT */
1641 @@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1642
1643 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1644 end = table + sechdrs[me->arch.unwind_section].sh_size;
1645 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1646 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1647
1648 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1649 me->arch.unwind_section, table, end, gp);
1650 diff -urNp linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c
1651 --- linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1652 +++ linux-2.6.32.42/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1653 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1654 /* At this point: (!vma || addr < vma->vm_end). */
1655 if (TASK_SIZE - len < addr)
1656 return -ENOMEM;
1657 - if (!vma || addr + len <= vma->vm_start)
1658 + if (check_heap_stack_gap(vma, addr, len))
1659 return addr;
1660 addr = vma->vm_end;
1661 }
1662 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1663 /* At this point: (!vma || addr < vma->vm_end). */
1664 if (TASK_SIZE - len < addr)
1665 return -ENOMEM;
1666 - if (!vma || addr + len <= vma->vm_start)
1667 + if (check_heap_stack_gap(vma, addr, len))
1668 return addr;
1669 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1670 if (addr < vma->vm_end) /* handle wraparound */
1671 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1672 if (flags & MAP_FIXED)
1673 return addr;
1674 if (!addr)
1675 - addr = TASK_UNMAPPED_BASE;
1676 + addr = current->mm->mmap_base;
1677
1678 if (filp) {
1679 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1680 diff -urNp linux-2.6.32.42/arch/parisc/kernel/traps.c linux-2.6.32.42/arch/parisc/kernel/traps.c
1681 --- linux-2.6.32.42/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1682 +++ linux-2.6.32.42/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1683 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1684
1685 down_read(&current->mm->mmap_sem);
1686 vma = find_vma(current->mm,regs->iaoq[0]);
1687 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1688 - && (vma->vm_flags & VM_EXEC)) {
1689 -
1690 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1691 fault_address = regs->iaoq[0];
1692 fault_space = regs->iasq[0];
1693
1694 diff -urNp linux-2.6.32.42/arch/parisc/mm/fault.c linux-2.6.32.42/arch/parisc/mm/fault.c
1695 --- linux-2.6.32.42/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1696 +++ linux-2.6.32.42/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1697 @@ -15,6 +15,7 @@
1698 #include <linux/sched.h>
1699 #include <linux/interrupt.h>
1700 #include <linux/module.h>
1701 +#include <linux/unistd.h>
1702
1703 #include <asm/uaccess.h>
1704 #include <asm/traps.h>
1705 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1706 static unsigned long
1707 parisc_acctyp(unsigned long code, unsigned int inst)
1708 {
1709 - if (code == 6 || code == 16)
1710 + if (code == 6 || code == 7 || code == 16)
1711 return VM_EXEC;
1712
1713 switch (inst & 0xf0000000) {
1714 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1715 }
1716 #endif
1717
1718 +#ifdef CONFIG_PAX_PAGEEXEC
1719 +/*
1720 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1721 + *
1722 + * returns 1 when task should be killed
1723 + * 2 when rt_sigreturn trampoline was detected
1724 + * 3 when unpatched PLT trampoline was detected
1725 + */
1726 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1727 +{
1728 +
1729 +#ifdef CONFIG_PAX_EMUPLT
1730 + int err;
1731 +
1732 + do { /* PaX: unpatched PLT emulation */
1733 + unsigned int bl, depwi;
1734 +
1735 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1736 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1737 +
1738 + if (err)
1739 + break;
1740 +
1741 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1742 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1743 +
1744 + err = get_user(ldw, (unsigned int *)addr);
1745 + err |= get_user(bv, (unsigned int *)(addr+4));
1746 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1747 +
1748 + if (err)
1749 + break;
1750 +
1751 + if (ldw == 0x0E801096U &&
1752 + bv == 0xEAC0C000U &&
1753 + ldw2 == 0x0E881095U)
1754 + {
1755 + unsigned int resolver, map;
1756 +
1757 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1758 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1759 + if (err)
1760 + break;
1761 +
1762 + regs->gr[20] = instruction_pointer(regs)+8;
1763 + regs->gr[21] = map;
1764 + regs->gr[22] = resolver;
1765 + regs->iaoq[0] = resolver | 3UL;
1766 + regs->iaoq[1] = regs->iaoq[0] + 4;
1767 + return 3;
1768 + }
1769 + }
1770 + } while (0);
1771 +#endif
1772 +
1773 +#ifdef CONFIG_PAX_EMUTRAMP
1774 +
1775 +#ifndef CONFIG_PAX_EMUSIGRT
1776 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1777 + return 1;
1778 +#endif
1779 +
1780 + do { /* PaX: rt_sigreturn emulation */
1781 + unsigned int ldi1, ldi2, bel, nop;
1782 +
1783 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1784 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1785 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1786 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1787 +
1788 + if (err)
1789 + break;
1790 +
1791 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1792 + ldi2 == 0x3414015AU &&
1793 + bel == 0xE4008200U &&
1794 + nop == 0x08000240U)
1795 + {
1796 + regs->gr[25] = (ldi1 & 2) >> 1;
1797 + regs->gr[20] = __NR_rt_sigreturn;
1798 + regs->gr[31] = regs->iaoq[1] + 16;
1799 + regs->sr[0] = regs->iasq[1];
1800 + regs->iaoq[0] = 0x100UL;
1801 + regs->iaoq[1] = regs->iaoq[0] + 4;
1802 + regs->iasq[0] = regs->sr[2];
1803 + regs->iasq[1] = regs->sr[2];
1804 + return 2;
1805 + }
1806 + } while (0);
1807 +#endif
1808 +
1809 + return 1;
1810 +}
1811 +
1812 +void pax_report_insns(void *pc, void *sp)
1813 +{
1814 + unsigned long i;
1815 +
1816 + printk(KERN_ERR "PAX: bytes at PC: ");
1817 + for (i = 0; i < 5; i++) {
1818 + unsigned int c;
1819 + if (get_user(c, (unsigned int *)pc+i))
1820 + printk(KERN_CONT "???????? ");
1821 + else
1822 + printk(KERN_CONT "%08x ", c);
1823 + }
1824 + printk("\n");
1825 +}
1826 +#endif
1827 +
1828 int fixup_exception(struct pt_regs *regs)
1829 {
1830 const struct exception_table_entry *fix;
1831 @@ -192,8 +303,33 @@ good_area:
1832
1833 acc_type = parisc_acctyp(code,regs->iir);
1834
1835 - if ((vma->vm_flags & acc_type) != acc_type)
1836 + if ((vma->vm_flags & acc_type) != acc_type) {
1837 +
1838 +#ifdef CONFIG_PAX_PAGEEXEC
1839 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1840 + (address & ~3UL) == instruction_pointer(regs))
1841 + {
1842 + up_read(&mm->mmap_sem);
1843 + switch (pax_handle_fetch_fault(regs)) {
1844 +
1845 +#ifdef CONFIG_PAX_EMUPLT
1846 + case 3:
1847 + return;
1848 +#endif
1849 +
1850 +#ifdef CONFIG_PAX_EMUTRAMP
1851 + case 2:
1852 + return;
1853 +#endif
1854 +
1855 + }
1856 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1857 + do_group_exit(SIGKILL);
1858 + }
1859 +#endif
1860 +
1861 goto bad_area;
1862 + }
1863
1864 /*
1865 * If for any reason at all we couldn't handle the fault, make
1866 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/device.h linux-2.6.32.42/arch/powerpc/include/asm/device.h
1867 --- linux-2.6.32.42/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
1868 +++ linux-2.6.32.42/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
1869 @@ -14,7 +14,7 @@ struct dev_archdata {
1870 struct device_node *of_node;
1871
1872 /* DMA operations on that device */
1873 - struct dma_map_ops *dma_ops;
1874 + const struct dma_map_ops *dma_ops;
1875
1876 /*
1877 * When an iommu is in use, dma_data is used as a ptr to the base of the
1878 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h
1879 --- linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
1880 +++ linux-2.6.32.42/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
1881 @@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
1882 #ifdef CONFIG_PPC64
1883 extern struct dma_map_ops dma_iommu_ops;
1884 #endif
1885 -extern struct dma_map_ops dma_direct_ops;
1886 +extern const struct dma_map_ops dma_direct_ops;
1887
1888 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
1889 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
1890 {
1891 /* We don't handle the NULL dev case for ISA for now. We could
1892 * do it via an out of line call but it is not needed for now. The
1893 @@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
1894 return dev->archdata.dma_ops;
1895 }
1896
1897 -static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
1898 +static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
1899 {
1900 dev->archdata.dma_ops = ops;
1901 }
1902 @@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
1903
1904 static inline int dma_supported(struct device *dev, u64 mask)
1905 {
1906 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1907 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1908
1909 if (unlikely(dma_ops == NULL))
1910 return 0;
1911 @@ -132,7 +132,7 @@ static inline int dma_supported(struct d
1912
1913 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
1914 {
1915 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1916 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1917
1918 if (unlikely(dma_ops == NULL))
1919 return -EIO;
1920 @@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
1921 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1922 dma_addr_t *dma_handle, gfp_t flag)
1923 {
1924 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1925 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1926 void *cpu_addr;
1927
1928 BUG_ON(!dma_ops);
1929 @@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
1930 static inline void dma_free_coherent(struct device *dev, size_t size,
1931 void *cpu_addr, dma_addr_t dma_handle)
1932 {
1933 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1934 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1935
1936 BUG_ON(!dma_ops);
1937
1938 @@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
1939
1940 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1941 {
1942 - struct dma_map_ops *dma_ops = get_dma_ops(dev);
1943 + const struct dma_map_ops *dma_ops = get_dma_ops(dev);
1944
1945 if (dma_ops->mapping_error)
1946 return dma_ops->mapping_error(dev, dma_addr);
1947 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/elf.h linux-2.6.32.42/arch/powerpc/include/asm/elf.h
1948 --- linux-2.6.32.42/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1949 +++ linux-2.6.32.42/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1950 @@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1951 the loader. We need to make sure that it is out of the way of the program
1952 that it will "exec", and that there is sufficient room for the brk. */
1953
1954 -extern unsigned long randomize_et_dyn(unsigned long base);
1955 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1956 +#define ELF_ET_DYN_BASE (0x20000000)
1957 +
1958 +#ifdef CONFIG_PAX_ASLR
1959 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1960 +
1961 +#ifdef __powerpc64__
1962 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
1963 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
1964 +#else
1965 +#define PAX_DELTA_MMAP_LEN 15
1966 +#define PAX_DELTA_STACK_LEN 15
1967 +#endif
1968 +#endif
1969
1970 /*
1971 * Our registers are always unsigned longs, whether we're a 32 bit
1972 @@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
1973 (0x7ff >> (PAGE_SHIFT - 12)) : \
1974 (0x3ffff >> (PAGE_SHIFT - 12)))
1975
1976 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1977 -#define arch_randomize_brk arch_randomize_brk
1978 -
1979 #endif /* __KERNEL__ */
1980
1981 /*
1982 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/iommu.h linux-2.6.32.42/arch/powerpc/include/asm/iommu.h
1983 --- linux-2.6.32.42/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
1984 +++ linux-2.6.32.42/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
1985 @@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
1986 extern void iommu_init_early_dart(void);
1987 extern void iommu_init_early_pasemi(void);
1988
1989 +/* dma-iommu.c */
1990 +extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
1991 +
1992 #ifdef CONFIG_PCI
1993 extern void pci_iommu_init(void);
1994 extern void pci_direct_iommu_init(void);
1995 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h
1996 --- linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
1997 +++ linux-2.6.32.42/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
1998 @@ -26,6 +26,7 @@ enum km_type {
1999 KM_SOFTIRQ1,
2000 KM_PPC_SYNC_PAGE,
2001 KM_PPC_SYNC_ICACHE,
2002 + KM_CLEARPAGE,
2003 KM_TYPE_NR
2004 };
2005
2006 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/page_64.h linux-2.6.32.42/arch/powerpc/include/asm/page_64.h
2007 --- linux-2.6.32.42/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2008 +++ linux-2.6.32.42/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2009 @@ -180,15 +180,18 @@ do { \
2010 * stack by default, so in the absense of a PT_GNU_STACK program header
2011 * we turn execute permission off.
2012 */
2013 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2014 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2015 +#define VM_STACK_DEFAULT_FLAGS32 \
2016 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2017 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2018
2019 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2020 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2021
2022 +#ifndef CONFIG_PAX_PAGEEXEC
2023 #define VM_STACK_DEFAULT_FLAGS \
2024 (test_thread_flag(TIF_32BIT) ? \
2025 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2026 +#endif
2027
2028 #include <asm-generic/getorder.h>
2029
2030 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/page.h linux-2.6.32.42/arch/powerpc/include/asm/page.h
2031 --- linux-2.6.32.42/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2032 +++ linux-2.6.32.42/arch/powerpc/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
2033 @@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2034 * and needs to be executable. This means the whole heap ends
2035 * up being executable.
2036 */
2037 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2038 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2039 +#define VM_DATA_DEFAULT_FLAGS32 \
2040 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2041 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2042
2043 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2044 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2045 @@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2046 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2047 #endif
2048
2049 +#define ktla_ktva(addr) (addr)
2050 +#define ktva_ktla(addr) (addr)
2051 +
2052 #ifndef __ASSEMBLY__
2053
2054 #undef STRICT_MM_TYPECHECKS
2055 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/pci.h linux-2.6.32.42/arch/powerpc/include/asm/pci.h
2056 --- linux-2.6.32.42/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2057 +++ linux-2.6.32.42/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2058 @@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2059 }
2060
2061 #ifdef CONFIG_PCI
2062 -extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2063 -extern struct dma_map_ops *get_pci_dma_ops(void);
2064 +extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2065 +extern const struct dma_map_ops *get_pci_dma_ops(void);
2066 #else /* CONFIG_PCI */
2067 #define set_pci_dma_ops(d)
2068 #define get_pci_dma_ops() NULL
2069 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h
2070 --- linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2071 +++ linux-2.6.32.42/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2072 @@ -2,6 +2,7 @@
2073 #define _ASM_POWERPC_PGTABLE_H
2074 #ifdef __KERNEL__
2075
2076 +#include <linux/const.h>
2077 #ifndef __ASSEMBLY__
2078 #include <asm/processor.h> /* For TASK_SIZE */
2079 #include <asm/mmu.h>
2080 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h
2081 --- linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2082 +++ linux-2.6.32.42/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2083 @@ -21,6 +21,7 @@
2084 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2085 #define _PAGE_USER 0x004 /* usermode access allowed */
2086 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2087 +#define _PAGE_EXEC _PAGE_GUARDED
2088 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2089 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2090 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2091 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/reg.h linux-2.6.32.42/arch/powerpc/include/asm/reg.h
2092 --- linux-2.6.32.42/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2093 +++ linux-2.6.32.42/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2094 @@ -191,6 +191,7 @@
2095 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2096 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2097 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2098 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2099 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2100 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2101 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2102 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h
2103 --- linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2104 +++ linux-2.6.32.42/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2105 @@ -13,7 +13,7 @@
2106
2107 #include <linux/swiotlb.h>
2108
2109 -extern struct dma_map_ops swiotlb_dma_ops;
2110 +extern const struct dma_map_ops swiotlb_dma_ops;
2111
2112 static inline void dma_mark_clean(void *addr, size_t size) {}
2113
2114 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/system.h linux-2.6.32.42/arch/powerpc/include/asm/system.h
2115 --- linux-2.6.32.42/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2116 +++ linux-2.6.32.42/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2117 @@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2118 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2119 #endif
2120
2121 -extern unsigned long arch_align_stack(unsigned long sp);
2122 +#define arch_align_stack(x) ((x) & ~0xfUL)
2123
2124 /* Used in very early kernel initialization. */
2125 extern unsigned long reloc_offset(void);
2126 diff -urNp linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h
2127 --- linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2128 +++ linux-2.6.32.42/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2129 @@ -13,6 +13,8 @@
2130 #define VERIFY_READ 0
2131 #define VERIFY_WRITE 1
2132
2133 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
2134 +
2135 /*
2136 * The fs value determines whether argument validity checking should be
2137 * performed or not. If get_fs() == USER_DS, checking is performed, with
2138 @@ -327,52 +329,6 @@ do { \
2139 extern unsigned long __copy_tofrom_user(void __user *to,
2140 const void __user *from, unsigned long size);
2141
2142 -#ifndef __powerpc64__
2143 -
2144 -static inline unsigned long copy_from_user(void *to,
2145 - const void __user *from, unsigned long n)
2146 -{
2147 - unsigned long over;
2148 -
2149 - if (access_ok(VERIFY_READ, from, n))
2150 - return __copy_tofrom_user((__force void __user *)to, from, n);
2151 - if ((unsigned long)from < TASK_SIZE) {
2152 - over = (unsigned long)from + n - TASK_SIZE;
2153 - return __copy_tofrom_user((__force void __user *)to, from,
2154 - n - over) + over;
2155 - }
2156 - return n;
2157 -}
2158 -
2159 -static inline unsigned long copy_to_user(void __user *to,
2160 - const void *from, unsigned long n)
2161 -{
2162 - unsigned long over;
2163 -
2164 - if (access_ok(VERIFY_WRITE, to, n))
2165 - return __copy_tofrom_user(to, (__force void __user *)from, n);
2166 - if ((unsigned long)to < TASK_SIZE) {
2167 - over = (unsigned long)to + n - TASK_SIZE;
2168 - return __copy_tofrom_user(to, (__force void __user *)from,
2169 - n - over) + over;
2170 - }
2171 - return n;
2172 -}
2173 -
2174 -#else /* __powerpc64__ */
2175 -
2176 -#define __copy_in_user(to, from, size) \
2177 - __copy_tofrom_user((to), (from), (size))
2178 -
2179 -extern unsigned long copy_from_user(void *to, const void __user *from,
2180 - unsigned long n);
2181 -extern unsigned long copy_to_user(void __user *to, const void *from,
2182 - unsigned long n);
2183 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
2184 - unsigned long n);
2185 -
2186 -#endif /* __powerpc64__ */
2187 -
2188 static inline unsigned long __copy_from_user_inatomic(void *to,
2189 const void __user *from, unsigned long n)
2190 {
2191 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2192 if (ret == 0)
2193 return 0;
2194 }
2195 +
2196 + if (!__builtin_constant_p(n))
2197 + check_object_size(to, n, false);
2198 +
2199 return __copy_tofrom_user((__force void __user *)to, from, n);
2200 }
2201
2202 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2203 if (ret == 0)
2204 return 0;
2205 }
2206 +
2207 + if (!__builtin_constant_p(n))
2208 + check_object_size(from, n, true);
2209 +
2210 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2211 }
2212
2213 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2214 return __copy_to_user_inatomic(to, from, size);
2215 }
2216
2217 +#ifndef __powerpc64__
2218 +
2219 +static inline unsigned long __must_check copy_from_user(void *to,
2220 + const void __user *from, unsigned long n)
2221 +{
2222 + unsigned long over;
2223 +
2224 + if ((long)n < 0)
2225 + return n;
2226 +
2227 + if (access_ok(VERIFY_READ, from, n)) {
2228 + if (!__builtin_constant_p(n))
2229 + check_object_size(to, n, false);
2230 + return __copy_tofrom_user((__force void __user *)to, from, n);
2231 + }
2232 + if ((unsigned long)from < TASK_SIZE) {
2233 + over = (unsigned long)from + n - TASK_SIZE;
2234 + if (!__builtin_constant_p(n - over))
2235 + check_object_size(to, n - over, false);
2236 + return __copy_tofrom_user((__force void __user *)to, from,
2237 + n - over) + over;
2238 + }
2239 + return n;
2240 +}
2241 +
2242 +static inline unsigned long __must_check copy_to_user(void __user *to,
2243 + const void *from, unsigned long n)
2244 +{
2245 + unsigned long over;
2246 +
2247 + if ((long)n < 0)
2248 + return n;
2249 +
2250 + if (access_ok(VERIFY_WRITE, to, n)) {
2251 + if (!__builtin_constant_p(n))
2252 + check_object_size(from, n, true);
2253 + return __copy_tofrom_user(to, (__force void __user *)from, n);
2254 + }
2255 + if ((unsigned long)to < TASK_SIZE) {
2256 + over = (unsigned long)to + n - TASK_SIZE;
2257 + if (!__builtin_constant_p(n))
2258 + check_object_size(from, n - over, true);
2259 + return __copy_tofrom_user(to, (__force void __user *)from,
2260 + n - over) + over;
2261 + }
2262 + return n;
2263 +}
2264 +
2265 +#else /* __powerpc64__ */
2266 +
2267 +#define __copy_in_user(to, from, size) \
2268 + __copy_tofrom_user((to), (from), (size))
2269 +
2270 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2271 +{
2272 + if ((long)n < 0 || n > INT_MAX)
2273 + return n;
2274 +
2275 + if (!__builtin_constant_p(n))
2276 + check_object_size(to, n, false);
2277 +
2278 + if (likely(access_ok(VERIFY_READ, from, n)))
2279 + n = __copy_from_user(to, from, n);
2280 + else
2281 + memset(to, 0, n);
2282 + return n;
2283 +}
2284 +
2285 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2286 +{
2287 + if ((long)n < 0 || n > INT_MAX)
2288 + return n;
2289 +
2290 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
2291 + if (!__builtin_constant_p(n))
2292 + check_object_size(from, n, true);
2293 + n = __copy_to_user(to, from, n);
2294 + }
2295 + return n;
2296 +}
2297 +
2298 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
2299 + unsigned long n);
2300 +
2301 +#endif /* __powerpc64__ */
2302 +
2303 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2304
2305 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2306 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c
2307 --- linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2308 +++ linux-2.6.32.42/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2309 @@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2310 &cache_assoc_attr,
2311 };
2312
2313 -static struct sysfs_ops cache_index_ops = {
2314 +static const struct sysfs_ops cache_index_ops = {
2315 .show = cache_index_show,
2316 };
2317
2318 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/dma.c linux-2.6.32.42/arch/powerpc/kernel/dma.c
2319 --- linux-2.6.32.42/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2320 +++ linux-2.6.32.42/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2321 @@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2322 }
2323 #endif
2324
2325 -struct dma_map_ops dma_direct_ops = {
2326 +const struct dma_map_ops dma_direct_ops = {
2327 .alloc_coherent = dma_direct_alloc_coherent,
2328 .free_coherent = dma_direct_free_coherent,
2329 .map_sg = dma_direct_map_sg,
2330 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c
2331 --- linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2332 +++ linux-2.6.32.42/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2333 @@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2334 }
2335
2336 /* We support DMA to/from any memory page via the iommu */
2337 -static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2338 +int dma_iommu_dma_supported(struct device *dev, u64 mask)
2339 {
2340 struct iommu_table *tbl = get_iommu_table_base(dev);
2341
2342 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c
2343 --- linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2344 +++ linux-2.6.32.42/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2345 @@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2346 * map_page, and unmap_page on highmem, use normal dma_ops
2347 * for everything else.
2348 */
2349 -struct dma_map_ops swiotlb_dma_ops = {
2350 +const struct dma_map_ops swiotlb_dma_ops = {
2351 .alloc_coherent = dma_direct_alloc_coherent,
2352 .free_coherent = dma_direct_free_coherent,
2353 .map_sg = swiotlb_map_sg_attrs,
2354 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S
2355 --- linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2356 +++ linux-2.6.32.42/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2357 @@ -455,6 +455,7 @@ storage_fault_common:
2358 std r14,_DAR(r1)
2359 std r15,_DSISR(r1)
2360 addi r3,r1,STACK_FRAME_OVERHEAD
2361 + bl .save_nvgprs
2362 mr r4,r14
2363 mr r5,r15
2364 ld r14,PACA_EXGEN+EX_R14(r13)
2365 @@ -464,8 +465,7 @@ storage_fault_common:
2366 cmpdi r3,0
2367 bne- 1f
2368 b .ret_from_except_lite
2369 -1: bl .save_nvgprs
2370 - mr r5,r3
2371 +1: mr r5,r3
2372 addi r3,r1,STACK_FRAME_OVERHEAD
2373 ld r4,_DAR(r1)
2374 bl .bad_page_fault
2375 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S
2376 --- linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2377 +++ linux-2.6.32.42/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2378 @@ -818,10 +818,10 @@ handle_page_fault:
2379 11: ld r4,_DAR(r1)
2380 ld r5,_DSISR(r1)
2381 addi r3,r1,STACK_FRAME_OVERHEAD
2382 + bl .save_nvgprs
2383 bl .do_page_fault
2384 cmpdi r3,0
2385 beq+ 13f
2386 - bl .save_nvgprs
2387 mr r5,r3
2388 addi r3,r1,STACK_FRAME_OVERHEAD
2389 lwz r4,_DAR(r1)
2390 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c
2391 --- linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2392 +++ linux-2.6.32.42/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2393 @@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2394 return 1;
2395 }
2396
2397 -static struct dma_map_ops ibmebus_dma_ops = {
2398 +static const struct dma_map_ops ibmebus_dma_ops = {
2399 .alloc_coherent = ibmebus_alloc_coherent,
2400 .free_coherent = ibmebus_free_coherent,
2401 .map_sg = ibmebus_map_sg,
2402 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/kgdb.c linux-2.6.32.42/arch/powerpc/kernel/kgdb.c
2403 --- linux-2.6.32.42/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2404 +++ linux-2.6.32.42/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2405 @@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2406 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2407 return 0;
2408
2409 - if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2410 + if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2411 regs->nip += 4;
2412
2413 return 1;
2414 @@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2415 /*
2416 * Global data
2417 */
2418 -struct kgdb_arch arch_kgdb_ops = {
2419 +const struct kgdb_arch arch_kgdb_ops = {
2420 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2421 };
2422
2423 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/module_32.c linux-2.6.32.42/arch/powerpc/kernel/module_32.c
2424 --- linux-2.6.32.42/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2425 +++ linux-2.6.32.42/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2426 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2427 me->arch.core_plt_section = i;
2428 }
2429 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2430 - printk("Module doesn't contain .plt or .init.plt sections.\n");
2431 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2432 return -ENOEXEC;
2433 }
2434
2435 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2436
2437 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2438 /* Init, or core PLT? */
2439 - if (location >= mod->module_core
2440 - && location < mod->module_core + mod->core_size)
2441 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2442 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2443 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2444 - else
2445 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2446 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2447 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2448 + else {
2449 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2450 + return ~0UL;
2451 + }
2452
2453 /* Find this entry, or if that fails, the next avail. entry */
2454 while (entry->jump[0]) {
2455 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/module.c linux-2.6.32.42/arch/powerpc/kernel/module.c
2456 --- linux-2.6.32.42/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2457 +++ linux-2.6.32.42/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2458 @@ -31,11 +31,24 @@
2459
2460 LIST_HEAD(module_bug_list);
2461
2462 +#ifdef CONFIG_PAX_KERNEXEC
2463 void *module_alloc(unsigned long size)
2464 {
2465 if (size == 0)
2466 return NULL;
2467
2468 + return vmalloc(size);
2469 +}
2470 +
2471 +void *module_alloc_exec(unsigned long size)
2472 +#else
2473 +void *module_alloc(unsigned long size)
2474 +#endif
2475 +
2476 +{
2477 + if (size == 0)
2478 + return NULL;
2479 +
2480 return vmalloc_exec(size);
2481 }
2482
2483 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2484 vfree(module_region);
2485 }
2486
2487 +#ifdef CONFIG_PAX_KERNEXEC
2488 +void module_free_exec(struct module *mod, void *module_region)
2489 +{
2490 + module_free(mod, module_region);
2491 +}
2492 +#endif
2493 +
2494 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2495 const Elf_Shdr *sechdrs,
2496 const char *name)
2497 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/pci-common.c linux-2.6.32.42/arch/powerpc/kernel/pci-common.c
2498 --- linux-2.6.32.42/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2499 +++ linux-2.6.32.42/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2500 @@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2501 unsigned int ppc_pci_flags = 0;
2502
2503
2504 -static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2505 +static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2506
2507 -void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2508 +void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2509 {
2510 pci_dma_ops = dma_ops;
2511 }
2512
2513 -struct dma_map_ops *get_pci_dma_ops(void)
2514 +const struct dma_map_ops *get_pci_dma_ops(void)
2515 {
2516 return pci_dma_ops;
2517 }
2518 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/process.c linux-2.6.32.42/arch/powerpc/kernel/process.c
2519 --- linux-2.6.32.42/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2520 +++ linux-2.6.32.42/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2521 @@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2522 * Lookup NIP late so we have the best change of getting the
2523 * above info out without failing
2524 */
2525 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2526 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2527 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2528 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2529 #endif
2530 show_stack(current, (unsigned long *) regs->gpr[1]);
2531 if (!user_mode(regs))
2532 @@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2533 newsp = stack[0];
2534 ip = stack[STACK_FRAME_LR_SAVE];
2535 if (!firstframe || ip != lr) {
2536 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2537 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2538 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2539 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2540 - printk(" (%pS)",
2541 + printk(" (%pA)",
2542 (void *)current->ret_stack[curr_frame].ret);
2543 curr_frame--;
2544 }
2545 @@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2546 struct pt_regs *regs = (struct pt_regs *)
2547 (sp + STACK_FRAME_OVERHEAD);
2548 lr = regs->link;
2549 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2550 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2551 regs->trap, (void *)regs->nip, (void *)lr);
2552 firstframe = 1;
2553 }
2554 @@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2555 }
2556
2557 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2558 -
2559 -unsigned long arch_align_stack(unsigned long sp)
2560 -{
2561 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2562 - sp -= get_random_int() & ~PAGE_MASK;
2563 - return sp & ~0xf;
2564 -}
2565 -
2566 -static inline unsigned long brk_rnd(void)
2567 -{
2568 - unsigned long rnd = 0;
2569 -
2570 - /* 8MB for 32bit, 1GB for 64bit */
2571 - if (is_32bit_task())
2572 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2573 - else
2574 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2575 -
2576 - return rnd << PAGE_SHIFT;
2577 -}
2578 -
2579 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2580 -{
2581 - unsigned long base = mm->brk;
2582 - unsigned long ret;
2583 -
2584 -#ifdef CONFIG_PPC_STD_MMU_64
2585 - /*
2586 - * If we are using 1TB segments and we are allowed to randomise
2587 - * the heap, we can put it above 1TB so it is backed by a 1TB
2588 - * segment. Otherwise the heap will be in the bottom 1TB
2589 - * which always uses 256MB segments and this may result in a
2590 - * performance penalty.
2591 - */
2592 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2593 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2594 -#endif
2595 -
2596 - ret = PAGE_ALIGN(base + brk_rnd());
2597 -
2598 - if (ret < mm->brk)
2599 - return mm->brk;
2600 -
2601 - return ret;
2602 -}
2603 -
2604 -unsigned long randomize_et_dyn(unsigned long base)
2605 -{
2606 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2607 -
2608 - if (ret < base)
2609 - return base;
2610 -
2611 - return ret;
2612 -}
2613 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/signal_32.c linux-2.6.32.42/arch/powerpc/kernel/signal_32.c
2614 --- linux-2.6.32.42/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2615 +++ linux-2.6.32.42/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2616 @@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2617 /* Save user registers on the stack */
2618 frame = &rt_sf->uc.uc_mcontext;
2619 addr = frame;
2620 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2621 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2622 if (save_user_regs(regs, frame, 0, 1))
2623 goto badframe;
2624 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2625 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/signal_64.c linux-2.6.32.42/arch/powerpc/kernel/signal_64.c
2626 --- linux-2.6.32.42/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2627 +++ linux-2.6.32.42/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2628 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2629 current->thread.fpscr.val = 0;
2630
2631 /* Set up to return from userspace. */
2632 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2633 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2634 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2635 } else {
2636 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2637 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c
2638 --- linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2639 +++ linux-2.6.32.42/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2640 @@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2641 if (oldlenp) {
2642 if (!error) {
2643 if (get_user(oldlen, oldlenp) ||
2644 - put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2645 + put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2646 + copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2647 error = -EFAULT;
2648 }
2649 - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2650 }
2651 return error;
2652 }
2653 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/traps.c linux-2.6.32.42/arch/powerpc/kernel/traps.c
2654 --- linux-2.6.32.42/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2655 +++ linux-2.6.32.42/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2656 @@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2657 static inline void pmac_backlight_unblank(void) { }
2658 #endif
2659
2660 +extern void gr_handle_kernel_exploit(void);
2661 +
2662 int die(const char *str, struct pt_regs *regs, long err)
2663 {
2664 static struct {
2665 @@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2666 if (panic_on_oops)
2667 panic("Fatal exception");
2668
2669 + gr_handle_kernel_exploit();
2670 +
2671 oops_exit();
2672 do_exit(err);
2673
2674 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/vdso.c linux-2.6.32.42/arch/powerpc/kernel/vdso.c
2675 --- linux-2.6.32.42/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2676 +++ linux-2.6.32.42/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2677 @@ -36,6 +36,7 @@
2678 #include <asm/firmware.h>
2679 #include <asm/vdso.h>
2680 #include <asm/vdso_datapage.h>
2681 +#include <asm/mman.h>
2682
2683 #include "setup.h"
2684
2685 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2686 vdso_base = VDSO32_MBASE;
2687 #endif
2688
2689 - current->mm->context.vdso_base = 0;
2690 + current->mm->context.vdso_base = ~0UL;
2691
2692 /* vDSO has a problem and was disabled, just don't "enable" it for the
2693 * process
2694 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2695 vdso_base = get_unmapped_area(NULL, vdso_base,
2696 (vdso_pages << PAGE_SHIFT) +
2697 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2698 - 0, 0);
2699 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2700 if (IS_ERR_VALUE(vdso_base)) {
2701 rc = vdso_base;
2702 goto fail_mmapsem;
2703 diff -urNp linux-2.6.32.42/arch/powerpc/kernel/vio.c linux-2.6.32.42/arch/powerpc/kernel/vio.c
2704 --- linux-2.6.32.42/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2705 +++ linux-2.6.32.42/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2706 @@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2707 vio_cmo_dealloc(viodev, alloc_size);
2708 }
2709
2710 -struct dma_map_ops vio_dma_mapping_ops = {
2711 +static const struct dma_map_ops vio_dma_mapping_ops = {
2712 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2713 .free_coherent = vio_dma_iommu_free_coherent,
2714 .map_sg = vio_dma_iommu_map_sg,
2715 .unmap_sg = vio_dma_iommu_unmap_sg,
2716 + .dma_supported = dma_iommu_dma_supported,
2717 .map_page = vio_dma_iommu_map_page,
2718 .unmap_page = vio_dma_iommu_unmap_page,
2719
2720 @@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2721
2722 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2723 {
2724 - vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2725 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2726 }
2727
2728 diff -urNp linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c
2729 --- linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2730 +++ linux-2.6.32.42/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2731 @@ -9,22 +9,6 @@
2732 #include <linux/module.h>
2733 #include <asm/uaccess.h>
2734
2735 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2736 -{
2737 - if (likely(access_ok(VERIFY_READ, from, n)))
2738 - n = __copy_from_user(to, from, n);
2739 - else
2740 - memset(to, 0, n);
2741 - return n;
2742 -}
2743 -
2744 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2745 -{
2746 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2747 - n = __copy_to_user(to, from, n);
2748 - return n;
2749 -}
2750 -
2751 unsigned long copy_in_user(void __user *to, const void __user *from,
2752 unsigned long n)
2753 {
2754 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2755 return n;
2756 }
2757
2758 -EXPORT_SYMBOL(copy_from_user);
2759 -EXPORT_SYMBOL(copy_to_user);
2760 EXPORT_SYMBOL(copy_in_user);
2761
2762 diff -urNp linux-2.6.32.42/arch/powerpc/mm/fault.c linux-2.6.32.42/arch/powerpc/mm/fault.c
2763 --- linux-2.6.32.42/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2764 +++ linux-2.6.32.42/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2765 @@ -30,6 +30,10 @@
2766 #include <linux/kprobes.h>
2767 #include <linux/kdebug.h>
2768 #include <linux/perf_event.h>
2769 +#include <linux/slab.h>
2770 +#include <linux/pagemap.h>
2771 +#include <linux/compiler.h>
2772 +#include <linux/unistd.h>
2773
2774 #include <asm/firmware.h>
2775 #include <asm/page.h>
2776 @@ -40,6 +44,7 @@
2777 #include <asm/uaccess.h>
2778 #include <asm/tlbflush.h>
2779 #include <asm/siginfo.h>
2780 +#include <asm/ptrace.h>
2781
2782
2783 #ifdef CONFIG_KPROBES
2784 @@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2785 }
2786 #endif
2787
2788 +#ifdef CONFIG_PAX_PAGEEXEC
2789 +/*
2790 + * PaX: decide what to do with offenders (regs->nip = fault address)
2791 + *
2792 + * returns 1 when task should be killed
2793 + */
2794 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2795 +{
2796 + return 1;
2797 +}
2798 +
2799 +void pax_report_insns(void *pc, void *sp)
2800 +{
2801 + unsigned long i;
2802 +
2803 + printk(KERN_ERR "PAX: bytes at PC: ");
2804 + for (i = 0; i < 5; i++) {
2805 + unsigned int c;
2806 + if (get_user(c, (unsigned int __user *)pc+i))
2807 + printk(KERN_CONT "???????? ");
2808 + else
2809 + printk(KERN_CONT "%08x ", c);
2810 + }
2811 + printk("\n");
2812 +}
2813 +#endif
2814 +
2815 /*
2816 * Check whether the instruction at regs->nip is a store using
2817 * an update addressing form which will update r1.
2818 @@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2819 * indicate errors in DSISR but can validly be set in SRR1.
2820 */
2821 if (trap == 0x400)
2822 - error_code &= 0x48200000;
2823 + error_code &= 0x58200000;
2824 else
2825 is_write = error_code & DSISR_ISSTORE;
2826 #else
2827 @@ -250,7 +282,7 @@ good_area:
2828 * "undefined". Of those that can be set, this is the only
2829 * one which seems bad.
2830 */
2831 - if (error_code & 0x10000000)
2832 + if (error_code & DSISR_GUARDED)
2833 /* Guarded storage error. */
2834 goto bad_area;
2835 #endif /* CONFIG_8xx */
2836 @@ -265,7 +297,7 @@ good_area:
2837 * processors use the same I/D cache coherency mechanism
2838 * as embedded.
2839 */
2840 - if (error_code & DSISR_PROTFAULT)
2841 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2842 goto bad_area;
2843 #endif /* CONFIG_PPC_STD_MMU */
2844
2845 @@ -335,6 +367,23 @@ bad_area:
2846 bad_area_nosemaphore:
2847 /* User mode accesses cause a SIGSEGV */
2848 if (user_mode(regs)) {
2849 +
2850 +#ifdef CONFIG_PAX_PAGEEXEC
2851 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2852 +#ifdef CONFIG_PPC_STD_MMU
2853 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2854 +#else
2855 + if (is_exec && regs->nip == address) {
2856 +#endif
2857 + switch (pax_handle_fetch_fault(regs)) {
2858 + }
2859 +
2860 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2861 + do_group_exit(SIGKILL);
2862 + }
2863 + }
2864 +#endif
2865 +
2866 _exception(SIGSEGV, regs, code, address);
2867 return 0;
2868 }
2869 diff -urNp linux-2.6.32.42/arch/powerpc/mm/mmap_64.c linux-2.6.32.42/arch/powerpc/mm/mmap_64.c
2870 --- linux-2.6.32.42/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
2871 +++ linux-2.6.32.42/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
2872 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2873 */
2874 if (mmap_is_legacy()) {
2875 mm->mmap_base = TASK_UNMAPPED_BASE;
2876 +
2877 +#ifdef CONFIG_PAX_RANDMMAP
2878 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2879 + mm->mmap_base += mm->delta_mmap;
2880 +#endif
2881 +
2882 mm->get_unmapped_area = arch_get_unmapped_area;
2883 mm->unmap_area = arch_unmap_area;
2884 } else {
2885 mm->mmap_base = mmap_base();
2886 +
2887 +#ifdef CONFIG_PAX_RANDMMAP
2888 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2889 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2890 +#endif
2891 +
2892 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2893 mm->unmap_area = arch_unmap_area_topdown;
2894 }
2895 diff -urNp linux-2.6.32.42/arch/powerpc/mm/slice.c linux-2.6.32.42/arch/powerpc/mm/slice.c
2896 --- linux-2.6.32.42/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
2897 +++ linux-2.6.32.42/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
2898 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2899 if ((mm->task_size - len) < addr)
2900 return 0;
2901 vma = find_vma(mm, addr);
2902 - return (!vma || (addr + len) <= vma->vm_start);
2903 + return check_heap_stack_gap(vma, addr, len);
2904 }
2905
2906 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2907 @@ -256,7 +256,7 @@ full_search:
2908 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2909 continue;
2910 }
2911 - if (!vma || addr + len <= vma->vm_start) {
2912 + if (check_heap_stack_gap(vma, addr, len)) {
2913 /*
2914 * Remember the place where we stopped the search:
2915 */
2916 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2917 }
2918 }
2919
2920 - addr = mm->mmap_base;
2921 - while (addr > len) {
2922 + if (mm->mmap_base < len)
2923 + addr = -ENOMEM;
2924 + else
2925 + addr = mm->mmap_base - len;
2926 +
2927 + while (!IS_ERR_VALUE(addr)) {
2928 /* Go down by chunk size */
2929 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2930 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2931
2932 /* Check for hit with different page size */
2933 mask = slice_range_to_mask(addr, len);
2934 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2935 * return with success:
2936 */
2937 vma = find_vma(mm, addr);
2938 - if (!vma || (addr + len) <= vma->vm_start) {
2939 + if (check_heap_stack_gap(vma, addr, len)) {
2940 /* remember the address as a hint for next time */
2941 if (use_cache)
2942 mm->free_area_cache = addr;
2943 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2944 mm->cached_hole_size = vma->vm_start - addr;
2945
2946 /* try just below the current vma->vm_start */
2947 - addr = vma->vm_start;
2948 + addr = skip_heap_stack_gap(vma, len);
2949 }
2950
2951 /*
2952 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2953 if (fixed && addr > (mm->task_size - len))
2954 return -EINVAL;
2955
2956 +#ifdef CONFIG_PAX_RANDMMAP
2957 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2958 + addr = 0;
2959 +#endif
2960 +
2961 /* If hint, make sure it matches our alignment restrictions */
2962 if (!fixed && addr) {
2963 addr = _ALIGN_UP(addr, 1ul << pshift);
2964 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c
2965 --- linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
2966 +++ linux-2.6.32.42/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
2967 @@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
2968 lite5200_pm_target_state = PM_SUSPEND_ON;
2969 }
2970
2971 -static struct platform_suspend_ops lite5200_pm_ops = {
2972 +static const struct platform_suspend_ops lite5200_pm_ops = {
2973 .valid = lite5200_pm_valid,
2974 .begin = lite5200_pm_begin,
2975 .prepare = lite5200_pm_prepare,
2976 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c
2977 --- linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
2978 +++ linux-2.6.32.42/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
2979 @@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
2980 iounmap(mbar);
2981 }
2982
2983 -static struct platform_suspend_ops mpc52xx_pm_ops = {
2984 +static const struct platform_suspend_ops mpc52xx_pm_ops = {
2985 .valid = mpc52xx_pm_valid,
2986 .prepare = mpc52xx_pm_prepare,
2987 .enter = mpc52xx_pm_enter,
2988 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c
2989 --- linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
2990 +++ linux-2.6.32.42/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
2991 @@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
2992 return ret;
2993 }
2994
2995 -static struct platform_suspend_ops mpc83xx_suspend_ops = {
2996 +static const struct platform_suspend_ops mpc83xx_suspend_ops = {
2997 .valid = mpc83xx_suspend_valid,
2998 .begin = mpc83xx_suspend_begin,
2999 .enter = mpc83xx_suspend_enter,
3000 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c
3001 --- linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3002 +++ linux-2.6.32.42/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3003 @@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3004
3005 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3006
3007 -struct dma_map_ops dma_iommu_fixed_ops = {
3008 +const struct dma_map_ops dma_iommu_fixed_ops = {
3009 .alloc_coherent = dma_fixed_alloc_coherent,
3010 .free_coherent = dma_fixed_free_coherent,
3011 .map_sg = dma_fixed_map_sg,
3012 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c
3013 --- linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3014 +++ linux-2.6.32.42/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3015 @@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3016 return mask >= DMA_BIT_MASK(32);
3017 }
3018
3019 -static struct dma_map_ops ps3_sb_dma_ops = {
3020 +static const struct dma_map_ops ps3_sb_dma_ops = {
3021 .alloc_coherent = ps3_alloc_coherent,
3022 .free_coherent = ps3_free_coherent,
3023 .map_sg = ps3_sb_map_sg,
3024 @@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3025 .unmap_page = ps3_unmap_page,
3026 };
3027
3028 -static struct dma_map_ops ps3_ioc0_dma_ops = {
3029 +static const struct dma_map_ops ps3_ioc0_dma_ops = {
3030 .alloc_coherent = ps3_alloc_coherent,
3031 .free_coherent = ps3_free_coherent,
3032 .map_sg = ps3_ioc0_map_sg,
3033 diff -urNp linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig
3034 --- linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3035 +++ linux-2.6.32.42/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3036 @@ -2,6 +2,8 @@ config PPC_PSERIES
3037 depends on PPC64 && PPC_BOOK3S
3038 bool "IBM pSeries & new (POWER5-based) iSeries"
3039 select MPIC
3040 + select PCI_MSI
3041 + select XICS
3042 select PPC_I8259
3043 select PPC_RTAS
3044 select RTAS_ERROR_LOGGING
3045 diff -urNp linux-2.6.32.42/arch/s390/include/asm/elf.h linux-2.6.32.42/arch/s390/include/asm/elf.h
3046 --- linux-2.6.32.42/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3047 +++ linux-2.6.32.42/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3048 @@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3049 that it will "exec", and that there is sufficient room for the brk. */
3050 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3051
3052 +#ifdef CONFIG_PAX_ASLR
3053 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3054 +
3055 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3056 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3057 +#endif
3058 +
3059 /* This yields a mask that user programs can use to figure out what
3060 instruction set this CPU supports. */
3061
3062 diff -urNp linux-2.6.32.42/arch/s390/include/asm/setup.h linux-2.6.32.42/arch/s390/include/asm/setup.h
3063 --- linux-2.6.32.42/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3064 +++ linux-2.6.32.42/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3065 @@ -50,13 +50,13 @@ extern unsigned long memory_end;
3066 void detect_memory_layout(struct mem_chunk chunk[]);
3067
3068 #ifdef CONFIG_S390_SWITCH_AMODE
3069 -extern unsigned int switch_amode;
3070 +#define switch_amode (1)
3071 #else
3072 #define switch_amode (0)
3073 #endif
3074
3075 #ifdef CONFIG_S390_EXEC_PROTECT
3076 -extern unsigned int s390_noexec;
3077 +#define s390_noexec (1)
3078 #else
3079 #define s390_noexec (0)
3080 #endif
3081 diff -urNp linux-2.6.32.42/arch/s390/include/asm/uaccess.h linux-2.6.32.42/arch/s390/include/asm/uaccess.h
3082 --- linux-2.6.32.42/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3083 +++ linux-2.6.32.42/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3084 @@ -232,6 +232,10 @@ static inline unsigned long __must_check
3085 copy_to_user(void __user *to, const void *from, unsigned long n)
3086 {
3087 might_fault();
3088 +
3089 + if ((long)n < 0)
3090 + return n;
3091 +
3092 if (access_ok(VERIFY_WRITE, to, n))
3093 n = __copy_to_user(to, from, n);
3094 return n;
3095 @@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3096 static inline unsigned long __must_check
3097 __copy_from_user(void *to, const void __user *from, unsigned long n)
3098 {
3099 + if ((long)n < 0)
3100 + return n;
3101 +
3102 if (__builtin_constant_p(n) && (n <= 256))
3103 return uaccess.copy_from_user_small(n, from, to);
3104 else
3105 @@ -283,6 +290,10 @@ static inline unsigned long __must_check
3106 copy_from_user(void *to, const void __user *from, unsigned long n)
3107 {
3108 might_fault();
3109 +
3110 + if ((long)n < 0)
3111 + return n;
3112 +
3113 if (access_ok(VERIFY_READ, from, n))
3114 n = __copy_from_user(to, from, n);
3115 else
3116 diff -urNp linux-2.6.32.42/arch/s390/Kconfig linux-2.6.32.42/arch/s390/Kconfig
3117 --- linux-2.6.32.42/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3118 +++ linux-2.6.32.42/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3119 @@ -194,28 +194,26 @@ config AUDIT_ARCH
3120
3121 config S390_SWITCH_AMODE
3122 bool "Switch kernel/user addressing modes"
3123 + default y
3124 help
3125 This option allows to switch the addressing modes of kernel and user
3126 - space. The kernel parameter switch_amode=on will enable this feature,
3127 - default is disabled. Enabling this (via kernel parameter) on machines
3128 - earlier than IBM System z9-109 EC/BC will reduce system performance.
3129 + space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3130 + will reduce system performance.
3131
3132 Note that this option will also be selected by selecting the execute
3133 - protection option below. Enabling the execute protection via the
3134 - noexec kernel parameter will also switch the addressing modes,
3135 - independent of the switch_amode kernel parameter.
3136 + protection option below. Enabling the execute protection will also
3137 + switch the addressing modes, independent of this option.
3138
3139
3140 config S390_EXEC_PROTECT
3141 bool "Data execute protection"
3142 + default y
3143 select S390_SWITCH_AMODE
3144 help
3145 This option allows to enable a buffer overflow protection for user
3146 space programs and it also selects the addressing mode option above.
3147 - The kernel parameter noexec=on will enable this feature and also
3148 - switch the addressing modes, default is disabled. Enabling this (via
3149 - kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3150 - will reduce system performance.
3151 + Enabling this on machines earlier than IBM System z9-109 EC/BC will
3152 + reduce system performance.
3153
3154 comment "Code generation options"
3155
3156 diff -urNp linux-2.6.32.42/arch/s390/kernel/module.c linux-2.6.32.42/arch/s390/kernel/module.c
3157 --- linux-2.6.32.42/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3158 +++ linux-2.6.32.42/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3159 @@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3160
3161 /* Increase core size by size of got & plt and set start
3162 offsets for got and plt. */
3163 - me->core_size = ALIGN(me->core_size, 4);
3164 - me->arch.got_offset = me->core_size;
3165 - me->core_size += me->arch.got_size;
3166 - me->arch.plt_offset = me->core_size;
3167 - me->core_size += me->arch.plt_size;
3168 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
3169 + me->arch.got_offset = me->core_size_rw;
3170 + me->core_size_rw += me->arch.got_size;
3171 + me->arch.plt_offset = me->core_size_rx;
3172 + me->core_size_rx += me->arch.plt_size;
3173 return 0;
3174 }
3175
3176 @@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3177 if (info->got_initialized == 0) {
3178 Elf_Addr *gotent;
3179
3180 - gotent = me->module_core + me->arch.got_offset +
3181 + gotent = me->module_core_rw + me->arch.got_offset +
3182 info->got_offset;
3183 *gotent = val;
3184 info->got_initialized = 1;
3185 @@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3186 else if (r_type == R_390_GOTENT ||
3187 r_type == R_390_GOTPLTENT)
3188 *(unsigned int *) loc =
3189 - (val + (Elf_Addr) me->module_core - loc) >> 1;
3190 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3191 else if (r_type == R_390_GOT64 ||
3192 r_type == R_390_GOTPLT64)
3193 *(unsigned long *) loc = val;
3194 @@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3195 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3196 if (info->plt_initialized == 0) {
3197 unsigned int *ip;
3198 - ip = me->module_core + me->arch.plt_offset +
3199 + ip = me->module_core_rx + me->arch.plt_offset +
3200 info->plt_offset;
3201 #ifndef CONFIG_64BIT
3202 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3203 @@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3204 val - loc + 0xffffUL < 0x1ffffeUL) ||
3205 (r_type == R_390_PLT32DBL &&
3206 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3207 - val = (Elf_Addr) me->module_core +
3208 + val = (Elf_Addr) me->module_core_rx +
3209 me->arch.plt_offset +
3210 info->plt_offset;
3211 val += rela->r_addend - loc;
3212 @@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3213 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3214 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3215 val = val + rela->r_addend -
3216 - ((Elf_Addr) me->module_core + me->arch.got_offset);
3217 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3218 if (r_type == R_390_GOTOFF16)
3219 *(unsigned short *) loc = val;
3220 else if (r_type == R_390_GOTOFF32)
3221 @@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3222 break;
3223 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3224 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3225 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
3226 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3227 rela->r_addend - loc;
3228 if (r_type == R_390_GOTPC)
3229 *(unsigned int *) loc = val;
3230 diff -urNp linux-2.6.32.42/arch/s390/kernel/setup.c linux-2.6.32.42/arch/s390/kernel/setup.c
3231 --- linux-2.6.32.42/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3232 +++ linux-2.6.32.42/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3233 @@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3234 early_param("mem", early_parse_mem);
3235
3236 #ifdef CONFIG_S390_SWITCH_AMODE
3237 -unsigned int switch_amode = 0;
3238 -EXPORT_SYMBOL_GPL(switch_amode);
3239 -
3240 static int set_amode_and_uaccess(unsigned long user_amode,
3241 unsigned long user32_amode)
3242 {
3243 @@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3244 return 0;
3245 }
3246 }
3247 -
3248 -/*
3249 - * Switch kernel/user addressing modes?
3250 - */
3251 -static int __init early_parse_switch_amode(char *p)
3252 -{
3253 - switch_amode = 1;
3254 - return 0;
3255 -}
3256 -early_param("switch_amode", early_parse_switch_amode);
3257 -
3258 #else /* CONFIG_S390_SWITCH_AMODE */
3259 static inline int set_amode_and_uaccess(unsigned long user_amode,
3260 unsigned long user32_amode)
3261 @@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3262 }
3263 #endif /* CONFIG_S390_SWITCH_AMODE */
3264
3265 -#ifdef CONFIG_S390_EXEC_PROTECT
3266 -unsigned int s390_noexec = 0;
3267 -EXPORT_SYMBOL_GPL(s390_noexec);
3268 -
3269 -/*
3270 - * Enable execute protection?
3271 - */
3272 -static int __init early_parse_noexec(char *p)
3273 -{
3274 - if (!strncmp(p, "off", 3))
3275 - return 0;
3276 - switch_amode = 1;
3277 - s390_noexec = 1;
3278 - return 0;
3279 -}
3280 -early_param("noexec", early_parse_noexec);
3281 -#endif /* CONFIG_S390_EXEC_PROTECT */
3282 -
3283 static void setup_addressing_mode(void)
3284 {
3285 if (s390_noexec) {
3286 diff -urNp linux-2.6.32.42/arch/s390/mm/mmap.c linux-2.6.32.42/arch/s390/mm/mmap.c
3287 --- linux-2.6.32.42/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3288 +++ linux-2.6.32.42/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3289 @@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3290 */
3291 if (mmap_is_legacy()) {
3292 mm->mmap_base = TASK_UNMAPPED_BASE;
3293 +
3294 +#ifdef CONFIG_PAX_RANDMMAP
3295 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3296 + mm->mmap_base += mm->delta_mmap;
3297 +#endif
3298 +
3299 mm->get_unmapped_area = arch_get_unmapped_area;
3300 mm->unmap_area = arch_unmap_area;
3301 } else {
3302 mm->mmap_base = mmap_base();
3303 +
3304 +#ifdef CONFIG_PAX_RANDMMAP
3305 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3306 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3307 +#endif
3308 +
3309 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3310 mm->unmap_area = arch_unmap_area_topdown;
3311 }
3312 @@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3313 */
3314 if (mmap_is_legacy()) {
3315 mm->mmap_base = TASK_UNMAPPED_BASE;
3316 +
3317 +#ifdef CONFIG_PAX_RANDMMAP
3318 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3319 + mm->mmap_base += mm->delta_mmap;
3320 +#endif
3321 +
3322 mm->get_unmapped_area = s390_get_unmapped_area;
3323 mm->unmap_area = arch_unmap_area;
3324 } else {
3325 mm->mmap_base = mmap_base();
3326 +
3327 +#ifdef CONFIG_PAX_RANDMMAP
3328 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3329 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3330 +#endif
3331 +
3332 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3333 mm->unmap_area = arch_unmap_area_topdown;
3334 }
3335 diff -urNp linux-2.6.32.42/arch/score/include/asm/system.h linux-2.6.32.42/arch/score/include/asm/system.h
3336 --- linux-2.6.32.42/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3337 +++ linux-2.6.32.42/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3338 @@ -17,7 +17,7 @@ do { \
3339 #define finish_arch_switch(prev) do {} while (0)
3340
3341 typedef void (*vi_handler_t)(void);
3342 -extern unsigned long arch_align_stack(unsigned long sp);
3343 +#define arch_align_stack(x) (x)
3344
3345 #define mb() barrier()
3346 #define rmb() barrier()
3347 diff -urNp linux-2.6.32.42/arch/score/kernel/process.c linux-2.6.32.42/arch/score/kernel/process.c
3348 --- linux-2.6.32.42/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3349 +++ linux-2.6.32.42/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3350 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3351
3352 return task_pt_regs(task)->cp0_epc;
3353 }
3354 -
3355 -unsigned long arch_align_stack(unsigned long sp)
3356 -{
3357 - return sp;
3358 -}
3359 diff -urNp linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c
3360 --- linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3361 +++ linux-2.6.32.42/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3362 @@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3363 return 0;
3364 }
3365
3366 -static struct platform_suspend_ops hp6x0_pm_ops = {
3367 +static const struct platform_suspend_ops hp6x0_pm_ops = {
3368 .enter = hp6x0_pm_enter,
3369 .valid = suspend_valid_only_mem,
3370 };
3371 diff -urNp linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c
3372 --- linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3373 +++ linux-2.6.32.42/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3374 @@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3375 NULL,
3376 };
3377
3378 -static struct sysfs_ops sq_sysfs_ops = {
3379 +static const struct sysfs_ops sq_sysfs_ops = {
3380 .show = sq_sysfs_show,
3381 .store = sq_sysfs_store,
3382 };
3383 diff -urNp linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c
3384 --- linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3385 +++ linux-2.6.32.42/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3386 @@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3387 return 0;
3388 }
3389
3390 -static struct platform_suspend_ops sh_pm_ops = {
3391 +static const struct platform_suspend_ops sh_pm_ops = {
3392 .enter = sh_pm_enter,
3393 .valid = suspend_valid_only_mem,
3394 };
3395 diff -urNp linux-2.6.32.42/arch/sh/kernel/kgdb.c linux-2.6.32.42/arch/sh/kernel/kgdb.c
3396 --- linux-2.6.32.42/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3397 +++ linux-2.6.32.42/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3398 @@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3399 {
3400 }
3401
3402 -struct kgdb_arch arch_kgdb_ops = {
3403 +const struct kgdb_arch arch_kgdb_ops = {
3404 /* Breakpoint instruction: trapa #0x3c */
3405 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3406 .gdb_bpt_instr = { 0x3c, 0xc3 },
3407 diff -urNp linux-2.6.32.42/arch/sh/mm/mmap.c linux-2.6.32.42/arch/sh/mm/mmap.c
3408 --- linux-2.6.32.42/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3409 +++ linux-2.6.32.42/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3410 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3411 addr = PAGE_ALIGN(addr);
3412
3413 vma = find_vma(mm, addr);
3414 - if (TASK_SIZE - len >= addr &&
3415 - (!vma || addr + len <= vma->vm_start))
3416 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3417 return addr;
3418 }
3419
3420 @@ -106,7 +105,7 @@ full_search:
3421 }
3422 return -ENOMEM;
3423 }
3424 - if (likely(!vma || addr + len <= vma->vm_start)) {
3425 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3426 /*
3427 * Remember the place where we stopped the search:
3428 */
3429 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3430 addr = PAGE_ALIGN(addr);
3431
3432 vma = find_vma(mm, addr);
3433 - if (TASK_SIZE - len >= addr &&
3434 - (!vma || addr + len <= vma->vm_start))
3435 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3436 return addr;
3437 }
3438
3439 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3440 /* make sure it can fit in the remaining address space */
3441 if (likely(addr > len)) {
3442 vma = find_vma(mm, addr-len);
3443 - if (!vma || addr <= vma->vm_start) {
3444 + if (check_heap_stack_gap(vma, addr - len, len)) {
3445 /* remember the address as a hint for next time */
3446 return (mm->free_area_cache = addr-len);
3447 }
3448 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3449 if (unlikely(mm->mmap_base < len))
3450 goto bottomup;
3451
3452 - addr = mm->mmap_base-len;
3453 - if (do_colour_align)
3454 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3455 + addr = mm->mmap_base - len;
3456
3457 do {
3458 + if (do_colour_align)
3459 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3460 /*
3461 * Lookup failure means no vma is above this address,
3462 * else if new region fits below vma->vm_start,
3463 * return with success:
3464 */
3465 vma = find_vma(mm, addr);
3466 - if (likely(!vma || addr+len <= vma->vm_start)) {
3467 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3468 /* remember the address as a hint for next time */
3469 return (mm->free_area_cache = addr);
3470 }
3471 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3472 mm->cached_hole_size = vma->vm_start - addr;
3473
3474 /* try just below the current vma->vm_start */
3475 - addr = vma->vm_start-len;
3476 - if (do_colour_align)
3477 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3478 - } while (likely(len < vma->vm_start));
3479 + addr = skip_heap_stack_gap(vma, len);
3480 + } while (!IS_ERR_VALUE(addr));
3481
3482 bottomup:
3483 /*
3484 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h
3485 --- linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3486 +++ linux-2.6.32.42/arch/sparc/include/asm/atomic_64.h 2011-05-04 17:56:20.000000000 -0400
3487 @@ -14,18 +14,40 @@
3488 #define ATOMIC64_INIT(i) { (i) }
3489
3490 #define atomic_read(v) ((v)->counter)
3491 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3492 +{
3493 + return v->counter;
3494 +}
3495 #define atomic64_read(v) ((v)->counter)
3496 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3497 +{
3498 + return v->counter;
3499 +}
3500
3501 #define atomic_set(v, i) (((v)->counter) = i)
3502 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3503 +{
3504 + v->counter = i;
3505 +}
3506 #define atomic64_set(v, i) (((v)->counter) = i)
3507 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3508 +{
3509 + v->counter = i;
3510 +}
3511
3512 extern void atomic_add(int, atomic_t *);
3513 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3514 extern void atomic64_add(long, atomic64_t *);
3515 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3516 extern void atomic_sub(int, atomic_t *);
3517 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3518 extern void atomic64_sub(long, atomic64_t *);
3519 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3520
3521 extern int atomic_add_ret(int, atomic_t *);
3522 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3523 extern long atomic64_add_ret(long, atomic64_t *);
3524 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3525 extern int atomic_sub_ret(int, atomic_t *);
3526 extern long atomic64_sub_ret(long, atomic64_t *);
3527
3528 @@ -33,7 +55,15 @@ extern long atomic64_sub_ret(long, atomi
3529 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3530
3531 #define atomic_inc_return(v) atomic_add_ret(1, v)
3532 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3533 +{
3534 + return atomic_add_ret_unchecked(1, v);
3535 +}
3536 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3537 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3538 +{
3539 + return atomic64_add_ret_unchecked(1, v);
3540 +}
3541
3542 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3543 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3544 @@ -50,6 +80,7 @@ extern long atomic64_sub_ret(long, atomi
3545 * other cases.
3546 */
3547 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3548 +#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
3549 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3550
3551 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3552 @@ -59,30 +90,59 @@ extern long atomic64_sub_ret(long, atomi
3553 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3554
3555 #define atomic_inc(v) atomic_add(1, v)
3556 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3557 +{
3558 + atomic_add_unchecked(1, v);
3559 +}
3560 #define atomic64_inc(v) atomic64_add(1, v)
3561 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3562 +{
3563 + atomic64_add_unchecked(1, v);
3564 +}
3565
3566 #define atomic_dec(v) atomic_sub(1, v)
3567 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3568 +{
3569 + atomic_sub_unchecked(1, v);
3570 +}
3571 #define atomic64_dec(v) atomic64_sub(1, v)
3572 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3573 +{
3574 + atomic64_sub_unchecked(1, v);
3575 +}
3576
3577 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3578 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3579
3580 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3581 +#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3582 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3583 +#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
3584
3585 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3586 {
3587 - int c, old;
3588 + int c, old, new;
3589 c = atomic_read(v);
3590 for (;;) {
3591 - if (unlikely(c == (u)))
3592 + if (unlikely(c == u))
3593 break;
3594 - old = atomic_cmpxchg((v), c, c + (a));
3595 +
3596 + asm volatile("addcc %2, %0, %0\n"
3597 +
3598 +#ifdef CONFIG_PAX_REFCOUNT
3599 + "tvs %%icc, 6\n"
3600 +#endif
3601 +
3602 + : "=r" (new)
3603 + : "0" (c), "ir" (a)
3604 + : "cc");
3605 +
3606 + old = atomic_cmpxchg(v, c, new);
3607 if (likely(old == c))
3608 break;
3609 c = old;
3610 }
3611 - return c != (u);
3612 + return c != u;
3613 }
3614
3615 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3616 @@ -93,17 +153,28 @@ static inline int atomic_add_unless(atom
3617
3618 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3619 {
3620 - long c, old;
3621 + long c, old, new;
3622 c = atomic64_read(v);
3623 for (;;) {
3624 - if (unlikely(c == (u)))
3625 + if (unlikely(c == u))
3626 break;
3627 - old = atomic64_cmpxchg((v), c, c + (a));
3628 +
3629 + asm volatile("addcc %2, %0, %0\n"
3630 +
3631 +#ifdef CONFIG_PAX_REFCOUNT
3632 + "tvs %%xcc, 6\n"
3633 +#endif
3634 +
3635 + : "=r" (new)
3636 + : "0" (c), "ir" (a)
3637 + : "cc");
3638 +
3639 + old = atomic64_cmpxchg(v, c, new);
3640 if (likely(old == c))
3641 break;
3642 c = old;
3643 }
3644 - return c != (u);
3645 + return c != u;
3646 }
3647
3648 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3649 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/cache.h linux-2.6.32.42/arch/sparc/include/asm/cache.h
3650 --- linux-2.6.32.42/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3651 +++ linux-2.6.32.42/arch/sparc/include/asm/cache.h 2011-05-17 19:26:34.000000000 -0400
3652 @@ -8,7 +8,7 @@
3653 #define _SPARC_CACHE_H
3654
3655 #define L1_CACHE_SHIFT 5
3656 -#define L1_CACHE_BYTES 32
3657 +#define L1_CACHE_BYTES 32U
3658 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3659
3660 #ifdef CONFIG_SPARC32
3661 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h
3662 --- linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3663 +++ linux-2.6.32.42/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3664 @@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3665 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3666 #define dma_is_consistent(d, h) (1)
3667
3668 -extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3669 +extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3670 extern struct bus_type pci_bus_type;
3671
3672 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3673 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3674 {
3675 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3676 if (dev->bus == &pci_bus_type)
3677 @@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3678 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3679 dma_addr_t *dma_handle, gfp_t flag)
3680 {
3681 - struct dma_map_ops *ops = get_dma_ops(dev);
3682 + const struct dma_map_ops *ops = get_dma_ops(dev);
3683 void *cpu_addr;
3684
3685 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3686 @@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3687 static inline void dma_free_coherent(struct device *dev, size_t size,
3688 void *cpu_addr, dma_addr_t dma_handle)
3689 {
3690 - struct dma_map_ops *ops = get_dma_ops(dev);
3691 + const struct dma_map_ops *ops = get_dma_ops(dev);
3692
3693 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3694 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3695 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/elf_32.h linux-2.6.32.42/arch/sparc/include/asm/elf_32.h
3696 --- linux-2.6.32.42/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3697 +++ linux-2.6.32.42/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3698 @@ -116,6 +116,13 @@ typedef struct {
3699
3700 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3701
3702 +#ifdef CONFIG_PAX_ASLR
3703 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3704 +
3705 +#define PAX_DELTA_MMAP_LEN 16
3706 +#define PAX_DELTA_STACK_LEN 16
3707 +#endif
3708 +
3709 /* This yields a mask that user programs can use to figure out what
3710 instruction set this cpu supports. This can NOT be done in userspace
3711 on Sparc. */
3712 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/elf_64.h linux-2.6.32.42/arch/sparc/include/asm/elf_64.h
3713 --- linux-2.6.32.42/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3714 +++ linux-2.6.32.42/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3715 @@ -163,6 +163,12 @@ typedef struct {
3716 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3717 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3718
3719 +#ifdef CONFIG_PAX_ASLR
3720 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3721 +
3722 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3723 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3724 +#endif
3725
3726 /* This yields a mask that user programs can use to figure out what
3727 instruction set this cpu supports. */
3728 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h
3729 --- linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3730 +++ linux-2.6.32.42/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3731 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3732 BTFIXUPDEF_INT(page_none)
3733 BTFIXUPDEF_INT(page_copy)
3734 BTFIXUPDEF_INT(page_readonly)
3735 +
3736 +#ifdef CONFIG_PAX_PAGEEXEC
3737 +BTFIXUPDEF_INT(page_shared_noexec)
3738 +BTFIXUPDEF_INT(page_copy_noexec)
3739 +BTFIXUPDEF_INT(page_readonly_noexec)
3740 +#endif
3741 +
3742 BTFIXUPDEF_INT(page_kernel)
3743
3744 #define PMD_SHIFT SUN4C_PMD_SHIFT
3745 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3746 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3747 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3748
3749 +#ifdef CONFIG_PAX_PAGEEXEC
3750 +extern pgprot_t PAGE_SHARED_NOEXEC;
3751 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3752 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3753 +#else
3754 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3755 +# define PAGE_COPY_NOEXEC PAGE_COPY
3756 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3757 +#endif
3758 +
3759 extern unsigned long page_kernel;
3760
3761 #ifdef MODULE
3762 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h
3763 --- linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
3764 +++ linux-2.6.32.42/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
3765 @@ -115,6 +115,13 @@
3766 SRMMU_EXEC | SRMMU_REF)
3767 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3768 SRMMU_EXEC | SRMMU_REF)
3769 +
3770 +#ifdef CONFIG_PAX_PAGEEXEC
3771 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3772 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3773 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3774 +#endif
3775 +
3776 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3777 SRMMU_DIRTY | SRMMU_REF)
3778
3779 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h
3780 --- linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
3781 +++ linux-2.6.32.42/arch/sparc/include/asm/spinlock_64.h 2011-05-04 17:56:20.000000000 -0400
3782 @@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
3783
3784 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3785
3786 -static void inline arch_read_lock(raw_rwlock_t *lock)
3787 +static inline void arch_read_lock(raw_rwlock_t *lock)
3788 {
3789 unsigned long tmp1, tmp2;
3790
3791 __asm__ __volatile__ (
3792 "1: ldsw [%2], %0\n"
3793 " brlz,pn %0, 2f\n"
3794 -"4: add %0, 1, %1\n"
3795 +"4: addcc %0, 1, %1\n"
3796 +
3797 +#ifdef CONFIG_PAX_REFCOUNT
3798 +" tvs %%icc, 6\n"
3799 +#endif
3800 +
3801 " cas [%2], %0, %1\n"
3802 " cmp %0, %1\n"
3803 " bne,pn %%icc, 1b\n"
3804 @@ -112,7 +117,7 @@ static void inline arch_read_lock(raw_rw
3805 " .previous"
3806 : "=&r" (tmp1), "=&r" (tmp2)
3807 : "r" (lock)
3808 - : "memory");
3809 + : "memory", "cc");
3810 }
3811
3812 static int inline arch_read_trylock(raw_rwlock_t *lock)
3813 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
3814 "1: ldsw [%2], %0\n"
3815 " brlz,a,pn %0, 2f\n"
3816 " mov 0, %0\n"
3817 -" add %0, 1, %1\n"
3818 +" addcc %0, 1, %1\n"
3819 +
3820 +#ifdef CONFIG_PAX_REFCOUNT
3821 +" tvs %%icc, 6\n"
3822 +#endif
3823 +
3824 " cas [%2], %0, %1\n"
3825 " cmp %0, %1\n"
3826 " bne,pn %%icc, 1b\n"
3827 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
3828 return tmp1;
3829 }
3830
3831 -static void inline arch_read_unlock(raw_rwlock_t *lock)
3832 +static inline void arch_read_unlock(raw_rwlock_t *lock)
3833 {
3834 unsigned long tmp1, tmp2;
3835
3836 __asm__ __volatile__(
3837 "1: lduw [%2], %0\n"
3838 -" sub %0, 1, %1\n"
3839 +" subcc %0, 1, %1\n"
3840 +
3841 +#ifdef CONFIG_PAX_REFCOUNT
3842 +" tvs %%icc, 6\n"
3843 +#endif
3844 +
3845 " cas [%2], %0, %1\n"
3846 " cmp %0, %1\n"
3847 " bne,pn %%xcc, 1b\n"
3848 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
3849 : "memory");
3850 }
3851
3852 -static void inline arch_write_lock(raw_rwlock_t *lock)
3853 +static inline void arch_write_lock(raw_rwlock_t *lock)
3854 {
3855 unsigned long mask, tmp1, tmp2;
3856
3857 @@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
3858 : "memory");
3859 }
3860
3861 -static void inline arch_write_unlock(raw_rwlock_t *lock)
3862 +static inline void arch_write_unlock(raw_rwlock_t *lock)
3863 {
3864 __asm__ __volatile__(
3865 " stw %%g0, [%0]"
3866 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h
3867 --- linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
3868 +++ linux-2.6.32.42/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
3869 @@ -50,6 +50,8 @@ struct thread_info {
3870 unsigned long w_saved;
3871
3872 struct restart_block restart_block;
3873 +
3874 + unsigned long lowest_stack;
3875 };
3876
3877 /*
3878 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h
3879 --- linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
3880 +++ linux-2.6.32.42/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
3881 @@ -68,6 +68,8 @@ struct thread_info {
3882 struct pt_regs *kern_una_regs;
3883 unsigned int kern_una_insn;
3884
3885 + unsigned long lowest_stack;
3886 +
3887 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3888 };
3889
3890 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h
3891 --- linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
3892 +++ linux-2.6.32.42/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
3893 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3894
3895 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3896 {
3897 - if (n && __access_ok((unsigned long) to, n))
3898 + if ((long)n < 0)
3899 + return n;
3900 +
3901 + if (n && __access_ok((unsigned long) to, n)) {
3902 + if (!__builtin_constant_p(n))
3903 + check_object_size(from, n, true);
3904 return __copy_user(to, (__force void __user *) from, n);
3905 - else
3906 + } else
3907 return n;
3908 }
3909
3910 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3911 {
3912 + if ((long)n < 0)
3913 + return n;
3914 +
3915 + if (!__builtin_constant_p(n))
3916 + check_object_size(from, n, true);
3917 +
3918 return __copy_user(to, (__force void __user *) from, n);
3919 }
3920
3921 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3922 {
3923 - if (n && __access_ok((unsigned long) from, n))
3924 + if ((long)n < 0)
3925 + return n;
3926 +
3927 + if (n && __access_ok((unsigned long) from, n)) {
3928 + if (!__builtin_constant_p(n))
3929 + check_object_size(to, n, false);
3930 return __copy_user((__force void __user *) to, from, n);
3931 - else
3932 + } else
3933 return n;
3934 }
3935
3936 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3937 {
3938 + if ((long)n < 0)
3939 + return n;
3940 +
3941 return __copy_user((__force void __user *) to, from, n);
3942 }
3943
3944 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h
3945 --- linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
3946 +++ linux-2.6.32.42/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
3947 @@ -9,6 +9,7 @@
3948 #include <linux/compiler.h>
3949 #include <linux/string.h>
3950 #include <linux/thread_info.h>
3951 +#include <linux/kernel.h>
3952 #include <asm/asi.h>
3953 #include <asm/system.h>
3954 #include <asm/spitfire.h>
3955 @@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
3956 static inline unsigned long __must_check
3957 copy_from_user(void *to, const void __user *from, unsigned long size)
3958 {
3959 - unsigned long ret = ___copy_from_user(to, from, size);
3960 + unsigned long ret;
3961
3962 + if ((long)size < 0 || size > INT_MAX)
3963 + return size;
3964 +
3965 + if (!__builtin_constant_p(size))
3966 + check_object_size(to, size, false);
3967 +
3968 + ret = ___copy_from_user(to, from, size);
3969 if (unlikely(ret))
3970 ret = copy_from_user_fixup(to, from, size);
3971 return ret;
3972 @@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
3973 static inline unsigned long __must_check
3974 copy_to_user(void __user *to, const void *from, unsigned long size)
3975 {
3976 - unsigned long ret = ___copy_to_user(to, from, size);
3977 + unsigned long ret;
3978 +
3979 + if ((long)size < 0 || size > INT_MAX)
3980 + return size;
3981 +
3982 + if (!__builtin_constant_p(size))
3983 + check_object_size(from, size, true);
3984
3985 + ret = ___copy_to_user(to, from, size);
3986 if (unlikely(ret))
3987 ret = copy_to_user_fixup(to, from, size);
3988 return ret;
3989 diff -urNp linux-2.6.32.42/arch/sparc/include/asm/uaccess.h linux-2.6.32.42/arch/sparc/include/asm/uaccess.h
3990 --- linux-2.6.32.42/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3991 +++ linux-2.6.32.42/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
3992 @@ -1,5 +1,13 @@
3993 #ifndef ___ASM_SPARC_UACCESS_H
3994 #define ___ASM_SPARC_UACCESS_H
3995 +
3996 +#ifdef __KERNEL__
3997 +#ifndef __ASSEMBLY__
3998 +#include <linux/types.h>
3999 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
4000 +#endif
4001 +#endif
4002 +
4003 #if defined(__sparc__) && defined(__arch64__)
4004 #include <asm/uaccess_64.h>
4005 #else
4006 diff -urNp linux-2.6.32.42/arch/sparc/kernel/iommu.c linux-2.6.32.42/arch/sparc/kernel/iommu.c
4007 --- linux-2.6.32.42/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4008 +++ linux-2.6.32.42/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4009 @@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4010 spin_unlock_irqrestore(&iommu->lock, flags);
4011 }
4012
4013 -static struct dma_map_ops sun4u_dma_ops = {
4014 +static const struct dma_map_ops sun4u_dma_ops = {
4015 .alloc_coherent = dma_4u_alloc_coherent,
4016 .free_coherent = dma_4u_free_coherent,
4017 .map_page = dma_4u_map_page,
4018 @@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4019 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4020 };
4021
4022 -struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4023 +const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4024 EXPORT_SYMBOL(dma_ops);
4025
4026 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4027 diff -urNp linux-2.6.32.42/arch/sparc/kernel/ioport.c linux-2.6.32.42/arch/sparc/kernel/ioport.c
4028 --- linux-2.6.32.42/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4029 +++ linux-2.6.32.42/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4030 @@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4031 BUG();
4032 }
4033
4034 -struct dma_map_ops sbus_dma_ops = {
4035 +const struct dma_map_ops sbus_dma_ops = {
4036 .alloc_coherent = sbus_alloc_coherent,
4037 .free_coherent = sbus_free_coherent,
4038 .map_page = sbus_map_page,
4039 @@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4040 .sync_sg_for_device = sbus_sync_sg_for_device,
4041 };
4042
4043 -struct dma_map_ops *dma_ops = &sbus_dma_ops;
4044 +const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4045 EXPORT_SYMBOL(dma_ops);
4046
4047 static int __init sparc_register_ioport(void)
4048 @@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4049 }
4050 }
4051
4052 -struct dma_map_ops pci32_dma_ops = {
4053 +const struct dma_map_ops pci32_dma_ops = {
4054 .alloc_coherent = pci32_alloc_coherent,
4055 .free_coherent = pci32_free_coherent,
4056 .map_page = pci32_map_page,
4057 diff -urNp linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c
4058 --- linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4059 +++ linux-2.6.32.42/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4060 @@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4061 {
4062 }
4063
4064 -struct kgdb_arch arch_kgdb_ops = {
4065 +const struct kgdb_arch arch_kgdb_ops = {
4066 /* Breakpoint instruction: ta 0x7d */
4067 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4068 };
4069 diff -urNp linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c
4070 --- linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4071 +++ linux-2.6.32.42/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4072 @@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4073 {
4074 }
4075
4076 -struct kgdb_arch arch_kgdb_ops = {
4077 +const struct kgdb_arch arch_kgdb_ops = {
4078 /* Breakpoint instruction: ta 0x72 */
4079 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4080 };
4081 diff -urNp linux-2.6.32.42/arch/sparc/kernel/Makefile linux-2.6.32.42/arch/sparc/kernel/Makefile
4082 --- linux-2.6.32.42/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4083 +++ linux-2.6.32.42/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4084 @@ -3,7 +3,7 @@
4085 #
4086
4087 asflags-y := -ansi
4088 -ccflags-y := -Werror
4089 +#ccflags-y := -Werror
4090
4091 extra-y := head_$(BITS).o
4092 extra-y += init_task.o
4093 diff -urNp linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c
4094 --- linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4095 +++ linux-2.6.32.42/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4096 @@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4097 spin_unlock_irqrestore(&iommu->lock, flags);
4098 }
4099
4100 -static struct dma_map_ops sun4v_dma_ops = {
4101 +static const struct dma_map_ops sun4v_dma_ops = {
4102 .alloc_coherent = dma_4v_alloc_coherent,
4103 .free_coherent = dma_4v_free_coherent,
4104 .map_page = dma_4v_map_page,
4105 diff -urNp linux-2.6.32.42/arch/sparc/kernel/process_32.c linux-2.6.32.42/arch/sparc/kernel/process_32.c
4106 --- linux-2.6.32.42/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4107 +++ linux-2.6.32.42/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4108 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4109 rw->ins[4], rw->ins[5],
4110 rw->ins[6],
4111 rw->ins[7]);
4112 - printk("%pS\n", (void *) rw->ins[7]);
4113 + printk("%pA\n", (void *) rw->ins[7]);
4114 rw = (struct reg_window32 *) rw->ins[6];
4115 }
4116 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4117 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4118
4119 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4120 r->psr, r->pc, r->npc, r->y, print_tainted());
4121 - printk("PC: <%pS>\n", (void *) r->pc);
4122 + printk("PC: <%pA>\n", (void *) r->pc);
4123 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4124 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4125 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4126 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4127 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4128 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4129 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4130 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4131
4132 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4133 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4134 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4135 rw = (struct reg_window32 *) fp;
4136 pc = rw->ins[7];
4137 printk("[%08lx : ", pc);
4138 - printk("%pS ] ", (void *) pc);
4139 + printk("%pA ] ", (void *) pc);
4140 fp = rw->ins[6];
4141 } while (++count < 16);
4142 printk("\n");
4143 diff -urNp linux-2.6.32.42/arch/sparc/kernel/process_64.c linux-2.6.32.42/arch/sparc/kernel/process_64.c
4144 --- linux-2.6.32.42/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4145 +++ linux-2.6.32.42/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4146 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4147 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4148 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4149 if (regs->tstate & TSTATE_PRIV)
4150 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4151 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4152 }
4153
4154 void show_regs(struct pt_regs *regs)
4155 {
4156 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4157 regs->tpc, regs->tnpc, regs->y, print_tainted());
4158 - printk("TPC: <%pS>\n", (void *) regs->tpc);
4159 + printk("TPC: <%pA>\n", (void *) regs->tpc);
4160 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4161 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4162 regs->u_regs[3]);
4163 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4164 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4165 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4166 regs->u_regs[15]);
4167 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4168 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4169 show_regwindow(regs);
4170 }
4171
4172 @@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4173 ((tp && tp->task) ? tp->task->pid : -1));
4174
4175 if (gp->tstate & TSTATE_PRIV) {
4176 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4177 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4178 (void *) gp->tpc,
4179 (void *) gp->o7,
4180 (void *) gp->i7,
4181 diff -urNp linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c
4182 --- linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4183 +++ linux-2.6.32.42/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4184 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4185 if (ARCH_SUN4C && len > 0x20000000)
4186 return -ENOMEM;
4187 if (!addr)
4188 - addr = TASK_UNMAPPED_BASE;
4189 + addr = current->mm->mmap_base;
4190
4191 if (flags & MAP_SHARED)
4192 addr = COLOUR_ALIGN(addr);
4193 @@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4194 }
4195 if (TASK_SIZE - PAGE_SIZE - len < addr)
4196 return -ENOMEM;
4197 - if (!vmm || addr + len <= vmm->vm_start)
4198 + if (check_heap_stack_gap(vmm, addr, len))
4199 return addr;
4200 addr = vmm->vm_end;
4201 if (flags & MAP_SHARED)
4202 diff -urNp linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c
4203 --- linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4204 +++ linux-2.6.32.42/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4205 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4206 /* We do not accept a shared mapping if it would violate
4207 * cache aliasing constraints.
4208 */
4209 - if ((flags & MAP_SHARED) &&
4210 + if ((filp || (flags & MAP_SHARED)) &&
4211 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4212 return -EINVAL;
4213 return addr;
4214 @@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4215 if (filp || (flags & MAP_SHARED))
4216 do_color_align = 1;
4217
4218 +#ifdef CONFIG_PAX_RANDMMAP
4219 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4220 +#endif
4221 +
4222 if (addr) {
4223 if (do_color_align)
4224 addr = COLOUR_ALIGN(addr, pgoff);
4225 @@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4226 addr = PAGE_ALIGN(addr);
4227
4228 vma = find_vma(mm, addr);
4229 - if (task_size - len >= addr &&
4230 - (!vma || addr + len <= vma->vm_start))
4231 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4232 return addr;
4233 }
4234
4235 if (len > mm->cached_hole_size) {
4236 - start_addr = addr = mm->free_area_cache;
4237 + start_addr = addr = mm->free_area_cache;
4238 } else {
4239 - start_addr = addr = TASK_UNMAPPED_BASE;
4240 + start_addr = addr = mm->mmap_base;
4241 mm->cached_hole_size = 0;
4242 }
4243
4244 @@ -175,14 +178,14 @@ full_search:
4245 vma = find_vma(mm, VA_EXCLUDE_END);
4246 }
4247 if (unlikely(task_size < addr)) {
4248 - if (start_addr != TASK_UNMAPPED_BASE) {
4249 - start_addr = addr = TASK_UNMAPPED_BASE;
4250 + if (start_addr != mm->mmap_base) {
4251 + start_addr = addr = mm->mmap_base;
4252 mm->cached_hole_size = 0;
4253 goto full_search;
4254 }
4255 return -ENOMEM;
4256 }
4257 - if (likely(!vma || addr + len <= vma->vm_start)) {
4258 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4259 /*
4260 * Remember the place where we stopped the search:
4261 */
4262 @@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4263 /* We do not accept a shared mapping if it would violate
4264 * cache aliasing constraints.
4265 */
4266 - if ((flags & MAP_SHARED) &&
4267 + if ((filp || (flags & MAP_SHARED)) &&
4268 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4269 return -EINVAL;
4270 return addr;
4271 @@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4272 addr = PAGE_ALIGN(addr);
4273
4274 vma = find_vma(mm, addr);
4275 - if (task_size - len >= addr &&
4276 - (!vma || addr + len <= vma->vm_start))
4277 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4278 return addr;
4279 }
4280
4281 @@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4282 /* make sure it can fit in the remaining address space */
4283 if (likely(addr > len)) {
4284 vma = find_vma(mm, addr-len);
4285 - if (!vma || addr <= vma->vm_start) {
4286 + if (check_heap_stack_gap(vma, addr - len, len)) {
4287 /* remember the address as a hint for next time */
4288 return (mm->free_area_cache = addr-len);
4289 }
4290 @@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4291 if (unlikely(mm->mmap_base < len))
4292 goto bottomup;
4293
4294 - addr = mm->mmap_base-len;
4295 - if (do_color_align)
4296 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4297 + addr = mm->mmap_base - len;
4298
4299 do {
4300 + if (do_color_align)
4301 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4302 /*
4303 * Lookup failure means no vma is above this address,
4304 * else if new region fits below vma->vm_start,
4305 * return with success:
4306 */
4307 vma = find_vma(mm, addr);
4308 - if (likely(!vma || addr+len <= vma->vm_start)) {
4309 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4310 /* remember the address as a hint for next time */
4311 return (mm->free_area_cache = addr);
4312 }
4313 @@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4314 mm->cached_hole_size = vma->vm_start - addr;
4315
4316 /* try just below the current vma->vm_start */
4317 - addr = vma->vm_start-len;
4318 - if (do_color_align)
4319 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4320 - } while (likely(len < vma->vm_start));
4321 + addr = skip_heap_stack_gap(vma, len);
4322 + } while (!IS_ERR_VALUE(addr));
4323
4324 bottomup:
4325 /*
4326 @@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4327 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4328 sysctl_legacy_va_layout) {
4329 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4330 +
4331 +#ifdef CONFIG_PAX_RANDMMAP
4332 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4333 + mm->mmap_base += mm->delta_mmap;
4334 +#endif
4335 +
4336 mm->get_unmapped_area = arch_get_unmapped_area;
4337 mm->unmap_area = arch_unmap_area;
4338 } else {
4339 @@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4340 gap = (task_size / 6 * 5);
4341
4342 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4343 +
4344 +#ifdef CONFIG_PAX_RANDMMAP
4345 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4346 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4347 +#endif
4348 +
4349 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4350 mm->unmap_area = arch_unmap_area_topdown;
4351 }
4352 diff -urNp linux-2.6.32.42/arch/sparc/kernel/traps_32.c linux-2.6.32.42/arch/sparc/kernel/traps_32.c
4353 --- linux-2.6.32.42/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4354 +++ linux-2.6.32.42/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4355 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4356 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4357 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4358
4359 +extern void gr_handle_kernel_exploit(void);
4360 +
4361 void die_if_kernel(char *str, struct pt_regs *regs)
4362 {
4363 static int die_counter;
4364 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4365 count++ < 30 &&
4366 (((unsigned long) rw) >= PAGE_OFFSET) &&
4367 !(((unsigned long) rw) & 0x7)) {
4368 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
4369 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
4370 (void *) rw->ins[7]);
4371 rw = (struct reg_window32 *)rw->ins[6];
4372 }
4373 }
4374 printk("Instruction DUMP:");
4375 instruction_dump ((unsigned long *) regs->pc);
4376 - if(regs->psr & PSR_PS)
4377 + if(regs->psr & PSR_PS) {
4378 + gr_handle_kernel_exploit();
4379 do_exit(SIGKILL);
4380 + }
4381 do_exit(SIGSEGV);
4382 }
4383
4384 diff -urNp linux-2.6.32.42/arch/sparc/kernel/traps_64.c linux-2.6.32.42/arch/sparc/kernel/traps_64.c
4385 --- linux-2.6.32.42/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4386 +++ linux-2.6.32.42/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4387 @@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4388 i + 1,
4389 p->trapstack[i].tstate, p->trapstack[i].tpc,
4390 p->trapstack[i].tnpc, p->trapstack[i].tt);
4391 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4392 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4393 }
4394 }
4395
4396 @@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4397
4398 lvl -= 0x100;
4399 if (regs->tstate & TSTATE_PRIV) {
4400 +
4401 +#ifdef CONFIG_PAX_REFCOUNT
4402 + if (lvl == 6)
4403 + pax_report_refcount_overflow(regs);
4404 +#endif
4405 +
4406 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4407 die_if_kernel(buffer, regs);
4408 }
4409 @@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4410 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4411 {
4412 char buffer[32];
4413 -
4414 +
4415 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4416 0, lvl, SIGTRAP) == NOTIFY_STOP)
4417 return;
4418
4419 +#ifdef CONFIG_PAX_REFCOUNT
4420 + if (lvl == 6)
4421 + pax_report_refcount_overflow(regs);
4422 +#endif
4423 +
4424 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4425
4426 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4427 @@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4428 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4429 printk("%s" "ERROR(%d): ",
4430 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4431 - printk("TPC<%pS>\n", (void *) regs->tpc);
4432 + printk("TPC<%pA>\n", (void *) regs->tpc);
4433 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4434 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4435 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4436 @@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4437 smp_processor_id(),
4438 (type & 0x1) ? 'I' : 'D',
4439 regs->tpc);
4440 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4441 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4442 panic("Irrecoverable Cheetah+ parity error.");
4443 }
4444
4445 @@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4446 smp_processor_id(),
4447 (type & 0x1) ? 'I' : 'D',
4448 regs->tpc);
4449 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4450 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4451 }
4452
4453 struct sun4v_error_entry {
4454 @@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4455
4456 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4457 regs->tpc, tl);
4458 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4459 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4460 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4461 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4462 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4463 (void *) regs->u_regs[UREG_I7]);
4464 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4465 "pte[%lx] error[%lx]\n",
4466 @@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4467
4468 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4469 regs->tpc, tl);
4470 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4471 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4472 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4473 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4474 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4475 (void *) regs->u_regs[UREG_I7]);
4476 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4477 "pte[%lx] error[%lx]\n",
4478 @@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4479 fp = (unsigned long)sf->fp + STACK_BIAS;
4480 }
4481
4482 - printk(" [%016lx] %pS\n", pc, (void *) pc);
4483 + printk(" [%016lx] %pA\n", pc, (void *) pc);
4484 } while (++count < 16);
4485 }
4486
4487 @@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4488 return (struct reg_window *) (fp + STACK_BIAS);
4489 }
4490
4491 +extern void gr_handle_kernel_exploit(void);
4492 +
4493 void die_if_kernel(char *str, struct pt_regs *regs)
4494 {
4495 static int die_counter;
4496 @@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4497 while (rw &&
4498 count++ < 30&&
4499 is_kernel_stack(current, rw)) {
4500 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
4501 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
4502 (void *) rw->ins[7]);
4503
4504 rw = kernel_stack_up(rw);
4505 @@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4506 }
4507 user_instruction_dump ((unsigned int __user *) regs->tpc);
4508 }
4509 - if (regs->tstate & TSTATE_PRIV)
4510 + if (regs->tstate & TSTATE_PRIV) {
4511 + gr_handle_kernel_exploit();
4512 do_exit(SIGKILL);
4513 + }
4514 +
4515 do_exit(SIGSEGV);
4516 }
4517 EXPORT_SYMBOL(die_if_kernel);
4518 diff -urNp linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c
4519 --- linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4520 +++ linux-2.6.32.42/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4521 @@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4522 if (count < 5) {
4523 last_time = jiffies;
4524 count++;
4525 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
4526 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
4527 regs->tpc, (void *) regs->tpc);
4528 }
4529 }
4530 diff -urNp linux-2.6.32.42/arch/sparc/lib/atomic_64.S linux-2.6.32.42/arch/sparc/lib/atomic_64.S
4531 --- linux-2.6.32.42/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4532 +++ linux-2.6.32.42/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4533 @@ -18,7 +18,12 @@
4534 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4535 BACKOFF_SETUP(%o2)
4536 1: lduw [%o1], %g1
4537 - add %g1, %o0, %g7
4538 + addcc %g1, %o0, %g7
4539 +
4540 +#ifdef CONFIG_PAX_REFCOUNT
4541 + tvs %icc, 6
4542 +#endif
4543 +
4544 cas [%o1], %g1, %g7
4545 cmp %g1, %g7
4546 bne,pn %icc, 2f
4547 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4548 2: BACKOFF_SPIN(%o2, %o3, 1b)
4549 .size atomic_add, .-atomic_add
4550
4551 + .globl atomic_add_unchecked
4552 + .type atomic_add_unchecked,#function
4553 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4554 + BACKOFF_SETUP(%o2)
4555 +1: lduw [%o1], %g1
4556 + add %g1, %o0, %g7
4557 + cas [%o1], %g1, %g7
4558 + cmp %g1, %g7
4559 + bne,pn %icc, 2f
4560 + nop
4561 + retl
4562 + nop
4563 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4564 + .size atomic_add_unchecked, .-atomic_add_unchecked
4565 +
4566 .globl atomic_sub
4567 .type atomic_sub,#function
4568 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4569 BACKOFF_SETUP(%o2)
4570 1: lduw [%o1], %g1
4571 - sub %g1, %o0, %g7
4572 + subcc %g1, %o0, %g7
4573 +
4574 +#ifdef CONFIG_PAX_REFCOUNT
4575 + tvs %icc, 6
4576 +#endif
4577 +
4578 cas [%o1], %g1, %g7
4579 cmp %g1, %g7
4580 bne,pn %icc, 2f
4581 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4582 2: BACKOFF_SPIN(%o2, %o3, 1b)
4583 .size atomic_sub, .-atomic_sub
4584
4585 + .globl atomic_sub_unchecked
4586 + .type atomic_sub_unchecked,#function
4587 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4588 + BACKOFF_SETUP(%o2)
4589 +1: lduw [%o1], %g1
4590 + sub %g1, %o0, %g7
4591 + cas [%o1], %g1, %g7
4592 + cmp %g1, %g7
4593 + bne,pn %icc, 2f
4594 + nop
4595 + retl
4596 + nop
4597 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4598 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
4599 +
4600 .globl atomic_add_ret
4601 .type atomic_add_ret,#function
4602 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4603 BACKOFF_SETUP(%o2)
4604 1: lduw [%o1], %g1
4605 - add %g1, %o0, %g7
4606 + addcc %g1, %o0, %g7
4607 +
4608 +#ifdef CONFIG_PAX_REFCOUNT
4609 + tvs %icc, 6
4610 +#endif
4611 +
4612 cas [%o1], %g1, %g7
4613 cmp %g1, %g7
4614 bne,pn %icc, 2f
4615 @@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4616 2: BACKOFF_SPIN(%o2, %o3, 1b)
4617 .size atomic_add_ret, .-atomic_add_ret
4618
4619 + .globl atomic_add_ret_unchecked
4620 + .type atomic_add_ret_unchecked,#function
4621 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4622 + BACKOFF_SETUP(%o2)
4623 +1: lduw [%o1], %g1
4624 + addcc %g1, %o0, %g7
4625 + cas [%o1], %g1, %g7
4626 + cmp %g1, %g7
4627 + bne,pn %icc, 2f
4628 + add %g7, %o0, %g7
4629 + sra %g7, 0, %o0
4630 + retl
4631 + nop
4632 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4633 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4634 +
4635 .globl atomic_sub_ret
4636 .type atomic_sub_ret,#function
4637 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4638 BACKOFF_SETUP(%o2)
4639 1: lduw [%o1], %g1
4640 - sub %g1, %o0, %g7
4641 + subcc %g1, %o0, %g7
4642 +
4643 +#ifdef CONFIG_PAX_REFCOUNT
4644 + tvs %icc, 6
4645 +#endif
4646 +
4647 cas [%o1], %g1, %g7
4648 cmp %g1, %g7
4649 bne,pn %icc, 2f
4650 @@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4651 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4652 BACKOFF_SETUP(%o2)
4653 1: ldx [%o1], %g1
4654 - add %g1, %o0, %g7
4655 + addcc %g1, %o0, %g7
4656 +
4657 +#ifdef CONFIG_PAX_REFCOUNT
4658 + tvs %xcc, 6
4659 +#endif
4660 +
4661 casx [%o1], %g1, %g7
4662 cmp %g1, %g7
4663 bne,pn %xcc, 2f
4664 @@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4665 2: BACKOFF_SPIN(%o2, %o3, 1b)
4666 .size atomic64_add, .-atomic64_add
4667
4668 + .globl atomic64_add_unchecked
4669 + .type atomic64_add_unchecked,#function
4670 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4671 + BACKOFF_SETUP(%o2)
4672 +1: ldx [%o1], %g1
4673 + addcc %g1, %o0, %g7
4674 + casx [%o1], %g1, %g7
4675 + cmp %g1, %g7
4676 + bne,pn %xcc, 2f
4677 + nop
4678 + retl
4679 + nop
4680 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4681 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4682 +
4683 .globl atomic64_sub
4684 .type atomic64_sub,#function
4685 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4686 BACKOFF_SETUP(%o2)
4687 1: ldx [%o1], %g1
4688 - sub %g1, %o0, %g7
4689 + subcc %g1, %o0, %g7
4690 +
4691 +#ifdef CONFIG_PAX_REFCOUNT
4692 + tvs %xcc, 6
4693 +#endif
4694 +
4695 casx [%o1], %g1, %g7
4696 cmp %g1, %g7
4697 bne,pn %xcc, 2f
4698 @@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4699 2: BACKOFF_SPIN(%o2, %o3, 1b)
4700 .size atomic64_sub, .-atomic64_sub
4701
4702 + .globl atomic64_sub_unchecked
4703 + .type atomic64_sub_unchecked,#function
4704 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4705 + BACKOFF_SETUP(%o2)
4706 +1: ldx [%o1], %g1
4707 + subcc %g1, %o0, %g7
4708 + casx [%o1], %g1, %g7
4709 + cmp %g1, %g7
4710 + bne,pn %xcc, 2f
4711 + nop
4712 + retl
4713 + nop
4714 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4715 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4716 +
4717 .globl atomic64_add_ret
4718 .type atomic64_add_ret,#function
4719 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4720 BACKOFF_SETUP(%o2)
4721 1: ldx [%o1], %g1
4722 - add %g1, %o0, %g7
4723 + addcc %g1, %o0, %g7
4724 +
4725 +#ifdef CONFIG_PAX_REFCOUNT
4726 + tvs %xcc, 6
4727 +#endif
4728 +
4729 casx [%o1], %g1, %g7
4730 cmp %g1, %g7
4731 bne,pn %xcc, 2f
4732 @@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4733 2: BACKOFF_SPIN(%o2, %o3, 1b)
4734 .size atomic64_add_ret, .-atomic64_add_ret
4735
4736 + .globl atomic64_add_ret_unchecked
4737 + .type atomic64_add_ret_unchecked,#function
4738 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4739 + BACKOFF_SETUP(%o2)
4740 +1: ldx [%o1], %g1
4741 + addcc %g1, %o0, %g7
4742 + casx [%o1], %g1, %g7
4743 + cmp %g1, %g7
4744 + bne,pn %xcc, 2f
4745 + add %g7, %o0, %g7
4746 + mov %g7, %o0
4747 + retl
4748 + nop
4749 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4750 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4751 +
4752 .globl atomic64_sub_ret
4753 .type atomic64_sub_ret,#function
4754 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4755 BACKOFF_SETUP(%o2)
4756 1: ldx [%o1], %g1
4757 - sub %g1, %o0, %g7
4758 + subcc %g1, %o0, %g7
4759 +
4760 +#ifdef CONFIG_PAX_REFCOUNT
4761 + tvs %xcc, 6
4762 +#endif
4763 +
4764 casx [%o1], %g1, %g7
4765 cmp %g1, %g7
4766 bne,pn %xcc, 2f
4767 diff -urNp linux-2.6.32.42/arch/sparc/lib/ksyms.c linux-2.6.32.42/arch/sparc/lib/ksyms.c
4768 --- linux-2.6.32.42/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
4769 +++ linux-2.6.32.42/arch/sparc/lib/ksyms.c 2011-04-17 15:56:46.000000000 -0400
4770 @@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
4771
4772 /* Atomic counter implementation. */
4773 EXPORT_SYMBOL(atomic_add);
4774 +EXPORT_SYMBOL(atomic_add_unchecked);
4775 EXPORT_SYMBOL(atomic_add_ret);
4776 EXPORT_SYMBOL(atomic_sub);
4777 +EXPORT_SYMBOL(atomic_sub_unchecked);
4778 EXPORT_SYMBOL(atomic_sub_ret);
4779 EXPORT_SYMBOL(atomic64_add);
4780 +EXPORT_SYMBOL(atomic64_add_unchecked);
4781 EXPORT_SYMBOL(atomic64_add_ret);
4782 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4783 EXPORT_SYMBOL(atomic64_sub);
4784 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4785 EXPORT_SYMBOL(atomic64_sub_ret);
4786
4787 /* Atomic bit operations. */
4788 diff -urNp linux-2.6.32.42/arch/sparc/lib/Makefile linux-2.6.32.42/arch/sparc/lib/Makefile
4789 --- linux-2.6.32.42/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
4790 +++ linux-2.6.32.42/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
4791 @@ -2,7 +2,7 @@
4792 #
4793
4794 asflags-y := -ansi -DST_DIV0=0x02
4795 -ccflags-y := -Werror
4796 +#ccflags-y := -Werror
4797
4798 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4799 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4800 diff -urNp linux-2.6.32.42/arch/sparc/lib/rwsem_64.S linux-2.6.32.42/arch/sparc/lib/rwsem_64.S
4801 --- linux-2.6.32.42/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
4802 +++ linux-2.6.32.42/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
4803 @@ -11,7 +11,12 @@
4804 .globl __down_read
4805 __down_read:
4806 1: lduw [%o0], %g1
4807 - add %g1, 1, %g7
4808 + addcc %g1, 1, %g7
4809 +
4810 +#ifdef CONFIG_PAX_REFCOUNT
4811 + tvs %icc, 6
4812 +#endif
4813 +
4814 cas [%o0], %g1, %g7
4815 cmp %g1, %g7
4816 bne,pn %icc, 1b
4817 @@ -33,7 +38,12 @@ __down_read:
4818 .globl __down_read_trylock
4819 __down_read_trylock:
4820 1: lduw [%o0], %g1
4821 - add %g1, 1, %g7
4822 + addcc %g1, 1, %g7
4823 +
4824 +#ifdef CONFIG_PAX_REFCOUNT
4825 + tvs %icc, 6
4826 +#endif
4827 +
4828 cmp %g7, 0
4829 bl,pn %icc, 2f
4830 mov 0, %o1
4831 @@ -51,7 +61,12 @@ __down_write:
4832 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4833 1:
4834 lduw [%o0], %g3
4835 - add %g3, %g1, %g7
4836 + addcc %g3, %g1, %g7
4837 +
4838 +#ifdef CONFIG_PAX_REFCOUNT
4839 + tvs %icc, 6
4840 +#endif
4841 +
4842 cas [%o0], %g3, %g7
4843 cmp %g3, %g7
4844 bne,pn %icc, 1b
4845 @@ -77,7 +92,12 @@ __down_write_trylock:
4846 cmp %g3, 0
4847 bne,pn %icc, 2f
4848 mov 0, %o1
4849 - add %g3, %g1, %g7
4850 + addcc %g3, %g1, %g7
4851 +
4852 +#ifdef CONFIG_PAX_REFCOUNT
4853 + tvs %icc, 6
4854 +#endif
4855 +
4856 cas [%o0], %g3, %g7
4857 cmp %g3, %g7
4858 bne,pn %icc, 1b
4859 @@ -90,7 +110,12 @@ __down_write_trylock:
4860 __up_read:
4861 1:
4862 lduw [%o0], %g1
4863 - sub %g1, 1, %g7
4864 + subcc %g1, 1, %g7
4865 +
4866 +#ifdef CONFIG_PAX_REFCOUNT
4867 + tvs %icc, 6
4868 +#endif
4869 +
4870 cas [%o0], %g1, %g7
4871 cmp %g1, %g7
4872 bne,pn %icc, 1b
4873 @@ -118,7 +143,12 @@ __up_write:
4874 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
4875 1:
4876 lduw [%o0], %g3
4877 - sub %g3, %g1, %g7
4878 + subcc %g3, %g1, %g7
4879 +
4880 +#ifdef CONFIG_PAX_REFCOUNT
4881 + tvs %icc, 6
4882 +#endif
4883 +
4884 cas [%o0], %g3, %g7
4885 cmp %g3, %g7
4886 bne,pn %icc, 1b
4887 @@ -143,7 +173,12 @@ __downgrade_write:
4888 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
4889 1:
4890 lduw [%o0], %g3
4891 - sub %g3, %g1, %g7
4892 + subcc %g3, %g1, %g7
4893 +
4894 +#ifdef CONFIG_PAX_REFCOUNT
4895 + tvs %icc, 6
4896 +#endif
4897 +
4898 cas [%o0], %g3, %g7
4899 cmp %g3, %g7
4900 bne,pn %icc, 1b
4901 diff -urNp linux-2.6.32.42/arch/sparc/Makefile linux-2.6.32.42/arch/sparc/Makefile
4902 --- linux-2.6.32.42/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
4903 +++ linux-2.6.32.42/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
4904 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4905 # Export what is needed by arch/sparc/boot/Makefile
4906 export VMLINUX_INIT VMLINUX_MAIN
4907 VMLINUX_INIT := $(head-y) $(init-y)
4908 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4909 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4910 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4911 VMLINUX_MAIN += $(drivers-y) $(net-y)
4912
4913 diff -urNp linux-2.6.32.42/arch/sparc/mm/fault_32.c linux-2.6.32.42/arch/sparc/mm/fault_32.c
4914 --- linux-2.6.32.42/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
4915 +++ linux-2.6.32.42/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
4916 @@ -21,6 +21,9 @@
4917 #include <linux/interrupt.h>
4918 #include <linux/module.h>
4919 #include <linux/kdebug.h>
4920 +#include <linux/slab.h>
4921 +#include <linux/pagemap.h>
4922 +#include <linux/compiler.h>
4923
4924 #include <asm/system.h>
4925 #include <asm/page.h>
4926 @@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
4927 return safe_compute_effective_address(regs, insn);
4928 }
4929
4930 +#ifdef CONFIG_PAX_PAGEEXEC
4931 +#ifdef CONFIG_PAX_DLRESOLVE
4932 +static void pax_emuplt_close(struct vm_area_struct *vma)
4933 +{
4934 + vma->vm_mm->call_dl_resolve = 0UL;
4935 +}
4936 +
4937 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4938 +{
4939 + unsigned int *kaddr;
4940 +
4941 + vmf->page = alloc_page(GFP_HIGHUSER);
4942 + if (!vmf->page)
4943 + return VM_FAULT_OOM;
4944 +
4945 + kaddr = kmap(vmf->page);
4946 + memset(kaddr, 0, PAGE_SIZE);
4947 + kaddr[0] = 0x9DE3BFA8U; /* save */
4948 + flush_dcache_page(vmf->page);
4949 + kunmap(vmf->page);
4950 + return VM_FAULT_MAJOR;
4951 +}
4952 +
4953 +static const struct vm_operations_struct pax_vm_ops = {
4954 + .close = pax_emuplt_close,
4955 + .fault = pax_emuplt_fault
4956 +};
4957 +
4958 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4959 +{
4960 + int ret;
4961 +
4962 + vma->vm_mm = current->mm;
4963 + vma->vm_start = addr;
4964 + vma->vm_end = addr + PAGE_SIZE;
4965 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4966 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4967 + vma->vm_ops = &pax_vm_ops;
4968 +
4969 + ret = insert_vm_struct(current->mm, vma);
4970 + if (ret)
4971 + return ret;
4972 +
4973 + ++current->mm->total_vm;
4974 + return 0;
4975 +}
4976 +#endif
4977 +
4978 +/*
4979 + * PaX: decide what to do with offenders (regs->pc = fault address)
4980 + *
4981 + * returns 1 when task should be killed
4982 + * 2 when patched PLT trampoline was detected
4983 + * 3 when unpatched PLT trampoline was detected
4984 + */
4985 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4986 +{
4987 +
4988 +#ifdef CONFIG_PAX_EMUPLT
4989 + int err;
4990 +
4991 + do { /* PaX: patched PLT emulation #1 */
4992 + unsigned int sethi1, sethi2, jmpl;
4993 +
4994 + err = get_user(sethi1, (unsigned int *)regs->pc);
4995 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4996 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4997 +
4998 + if (err)
4999 + break;
5000 +
5001 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5002 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5003 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5004 + {
5005 + unsigned int addr;
5006 +
5007 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5008 + addr = regs->u_regs[UREG_G1];
5009 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5010 + regs->pc = addr;
5011 + regs->npc = addr+4;
5012 + return 2;
5013 + }
5014 + } while (0);
5015 +
5016 + { /* PaX: patched PLT emulation #2 */
5017 + unsigned int ba;
5018 +
5019 + err = get_user(ba, (unsigned int *)regs->pc);
5020 +
5021 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5022 + unsigned int addr;
5023 +
5024 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5025 + regs->pc = addr;
5026 + regs->npc = addr+4;
5027 + return 2;
5028 + }
5029 + }
5030 +
5031 + do { /* PaX: patched PLT emulation #3 */
5032 + unsigned int sethi, jmpl, nop;
5033 +
5034 + err = get_user(sethi, (unsigned int *)regs->pc);
5035 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5036 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5037 +
5038 + if (err)
5039 + break;
5040 +
5041 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5042 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5043 + nop == 0x01000000U)
5044 + {
5045 + unsigned int addr;
5046 +
5047 + addr = (sethi & 0x003FFFFFU) << 10;
5048 + regs->u_regs[UREG_G1] = addr;
5049 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5050 + regs->pc = addr;
5051 + regs->npc = addr+4;
5052 + return 2;
5053 + }
5054 + } while (0);
5055 +
5056 + do { /* PaX: unpatched PLT emulation step 1 */
5057 + unsigned int sethi, ba, nop;
5058 +
5059 + err = get_user(sethi, (unsigned int *)regs->pc);
5060 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
5061 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
5062 +
5063 + if (err)
5064 + break;
5065 +
5066 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5067 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5068 + nop == 0x01000000U)
5069 + {
5070 + unsigned int addr, save, call;
5071 +
5072 + if ((ba & 0xFFC00000U) == 0x30800000U)
5073 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5074 + else
5075 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5076 +
5077 + err = get_user(save, (unsigned int *)addr);
5078 + err |= get_user(call, (unsigned int *)(addr+4));
5079 + err |= get_user(nop, (unsigned int *)(addr+8));
5080 + if (err)
5081 + break;
5082 +
5083 +#ifdef CONFIG_PAX_DLRESOLVE
5084 + if (save == 0x9DE3BFA8U &&
5085 + (call & 0xC0000000U) == 0x40000000U &&
5086 + nop == 0x01000000U)
5087 + {
5088 + struct vm_area_struct *vma;
5089 + unsigned long call_dl_resolve;
5090 +
5091 + down_read(&current->mm->mmap_sem);
5092 + call_dl_resolve = current->mm->call_dl_resolve;
5093 + up_read(&current->mm->mmap_sem);
5094 + if (likely(call_dl_resolve))
5095 + goto emulate;
5096 +
5097 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5098 +
5099 + down_write(&current->mm->mmap_sem);
5100 + if (current->mm->call_dl_resolve) {
5101 + call_dl_resolve = current->mm->call_dl_resolve;
5102 + up_write(&current->mm->mmap_sem);
5103 + if (vma)
5104 + kmem_cache_free(vm_area_cachep, vma);
5105 + goto emulate;
5106 + }
5107 +
5108 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5109 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5110 + up_write(&current->mm->mmap_sem);
5111 + if (vma)
5112 + kmem_cache_free(vm_area_cachep, vma);
5113 + return 1;
5114 + }
5115 +
5116 + if (pax_insert_vma(vma, call_dl_resolve)) {
5117 + up_write(&current->mm->mmap_sem);
5118 + kmem_cache_free(vm_area_cachep, vma);
5119 + return 1;
5120 + }
5121 +
5122 + current->mm->call_dl_resolve = call_dl_resolve;
5123 + up_write(&current->mm->mmap_sem);
5124 +
5125 +emulate:
5126 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5127 + regs->pc = call_dl_resolve;
5128 + regs->npc = addr+4;
5129 + return 3;
5130 + }
5131 +#endif
5132 +
5133 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5134 + if ((save & 0xFFC00000U) == 0x05000000U &&
5135 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5136 + nop == 0x01000000U)
5137 + {
5138 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5139 + regs->u_regs[UREG_G2] = addr + 4;
5140 + addr = (save & 0x003FFFFFU) << 10;
5141 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5142 + regs->pc = addr;
5143 + regs->npc = addr+4;
5144 + return 3;
5145 + }
5146 + }
5147 + } while (0);
5148 +
5149 + do { /* PaX: unpatched PLT emulation step 2 */
5150 + unsigned int save, call, nop;
5151 +
5152 + err = get_user(save, (unsigned int *)(regs->pc-4));
5153 + err |= get_user(call, (unsigned int *)regs->pc);
5154 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
5155 + if (err)
5156 + break;
5157 +
5158 + if (save == 0x9DE3BFA8U &&
5159 + (call & 0xC0000000U) == 0x40000000U &&
5160 + nop == 0x01000000U)
5161 + {
5162 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5163 +
5164 + regs->u_regs[UREG_RETPC] = regs->pc;
5165 + regs->pc = dl_resolve;
5166 + regs->npc = dl_resolve+4;
5167 + return 3;
5168 + }
5169 + } while (0);
5170 +#endif
5171 +
5172 + return 1;
5173 +}
5174 +
5175 +void pax_report_insns(void *pc, void *sp)
5176 +{
5177 + unsigned long i;
5178 +
5179 + printk(KERN_ERR "PAX: bytes at PC: ");
5180 + for (i = 0; i < 8; i++) {
5181 + unsigned int c;
5182 + if (get_user(c, (unsigned int *)pc+i))
5183 + printk(KERN_CONT "???????? ");
5184 + else
5185 + printk(KERN_CONT "%08x ", c);
5186 + }
5187 + printk("\n");
5188 +}
5189 +#endif
5190 +
5191 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5192 unsigned long address)
5193 {
5194 @@ -231,6 +495,24 @@ good_area:
5195 if(!(vma->vm_flags & VM_WRITE))
5196 goto bad_area;
5197 } else {
5198 +
5199 +#ifdef CONFIG_PAX_PAGEEXEC
5200 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5201 + up_read(&mm->mmap_sem);
5202 + switch (pax_handle_fetch_fault(regs)) {
5203 +
5204 +#ifdef CONFIG_PAX_EMUPLT
5205 + case 2:
5206 + case 3:
5207 + return;
5208 +#endif
5209 +
5210 + }
5211 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5212 + do_group_exit(SIGKILL);
5213 + }
5214 +#endif
5215 +
5216 /* Allow reads even for write-only mappings */
5217 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5218 goto bad_area;
5219 diff -urNp linux-2.6.32.42/arch/sparc/mm/fault_64.c linux-2.6.32.42/arch/sparc/mm/fault_64.c
5220 --- linux-2.6.32.42/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5221 +++ linux-2.6.32.42/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5222 @@ -20,6 +20,9 @@
5223 #include <linux/kprobes.h>
5224 #include <linux/kdebug.h>
5225 #include <linux/percpu.h>
5226 +#include <linux/slab.h>
5227 +#include <linux/pagemap.h>
5228 +#include <linux/compiler.h>
5229
5230 #include <asm/page.h>
5231 #include <asm/pgtable.h>
5232 @@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5233 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5234 regs->tpc);
5235 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5236 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5237 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5238 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5239 dump_stack();
5240 unhandled_fault(regs->tpc, current, regs);
5241 @@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5242 show_regs(regs);
5243 }
5244
5245 +#ifdef CONFIG_PAX_PAGEEXEC
5246 +#ifdef CONFIG_PAX_DLRESOLVE
5247 +static void pax_emuplt_close(struct vm_area_struct *vma)
5248 +{
5249 + vma->vm_mm->call_dl_resolve = 0UL;
5250 +}
5251 +
5252 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5253 +{
5254 + unsigned int *kaddr;
5255 +
5256 + vmf->page = alloc_page(GFP_HIGHUSER);
5257 + if (!vmf->page)
5258 + return VM_FAULT_OOM;
5259 +
5260 + kaddr = kmap(vmf->page);
5261 + memset(kaddr, 0, PAGE_SIZE);
5262 + kaddr[0] = 0x9DE3BFA8U; /* save */
5263 + flush_dcache_page(vmf->page);
5264 + kunmap(vmf->page);
5265 + return VM_FAULT_MAJOR;
5266 +}
5267 +
5268 +static const struct vm_operations_struct pax_vm_ops = {
5269 + .close = pax_emuplt_close,
5270 + .fault = pax_emuplt_fault
5271 +};
5272 +
5273 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5274 +{
5275 + int ret;
5276 +
5277 + vma->vm_mm = current->mm;
5278 + vma->vm_start = addr;
5279 + vma->vm_end = addr + PAGE_SIZE;
5280 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5281 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5282 + vma->vm_ops = &pax_vm_ops;
5283 +
5284 + ret = insert_vm_struct(current->mm, vma);
5285 + if (ret)
5286 + return ret;
5287 +
5288 + ++current->mm->total_vm;
5289 + return 0;
5290 +}
5291 +#endif
5292 +
5293 +/*
5294 + * PaX: decide what to do with offenders (regs->tpc = fault address)
5295 + *
5296 + * returns 1 when task should be killed
5297 + * 2 when patched PLT trampoline was detected
5298 + * 3 when unpatched PLT trampoline was detected
5299 + */
5300 +static int pax_handle_fetch_fault(struct pt_regs *regs)
5301 +{
5302 +
5303 +#ifdef CONFIG_PAX_EMUPLT
5304 + int err;
5305 +
5306 + do { /* PaX: patched PLT emulation #1 */
5307 + unsigned int sethi1, sethi2, jmpl;
5308 +
5309 + err = get_user(sethi1, (unsigned int *)regs->tpc);
5310 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5311 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5312 +
5313 + if (err)
5314 + break;
5315 +
5316 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5317 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
5318 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
5319 + {
5320 + unsigned long addr;
5321 +
5322 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5323 + addr = regs->u_regs[UREG_G1];
5324 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5325 +
5326 + if (test_thread_flag(TIF_32BIT))
5327 + addr &= 0xFFFFFFFFUL;
5328 +
5329 + regs->tpc = addr;
5330 + regs->tnpc = addr+4;
5331 + return 2;
5332 + }
5333 + } while (0);
5334 +
5335 + { /* PaX: patched PLT emulation #2 */
5336 + unsigned int ba;
5337 +
5338 + err = get_user(ba, (unsigned int *)regs->tpc);
5339 +
5340 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5341 + unsigned long addr;
5342 +
5343 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5344 +
5345 + if (test_thread_flag(TIF_32BIT))
5346 + addr &= 0xFFFFFFFFUL;
5347 +
5348 + regs->tpc = addr;
5349 + regs->tnpc = addr+4;
5350 + return 2;
5351 + }
5352 + }
5353 +
5354 + do { /* PaX: patched PLT emulation #3 */
5355 + unsigned int sethi, jmpl, nop;
5356 +
5357 + err = get_user(sethi, (unsigned int *)regs->tpc);
5358 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5359 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5360 +
5361 + if (err)
5362 + break;
5363 +
5364 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5365 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5366 + nop == 0x01000000U)
5367 + {
5368 + unsigned long addr;
5369 +
5370 + addr = (sethi & 0x003FFFFFU) << 10;
5371 + regs->u_regs[UREG_G1] = addr;
5372 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5373 +
5374 + if (test_thread_flag(TIF_32BIT))
5375 + addr &= 0xFFFFFFFFUL;
5376 +
5377 + regs->tpc = addr;
5378 + regs->tnpc = addr+4;
5379 + return 2;
5380 + }
5381 + } while (0);
5382 +
5383 + do { /* PaX: patched PLT emulation #4 */
5384 + unsigned int sethi, mov1, call, mov2;
5385 +
5386 + err = get_user(sethi, (unsigned int *)regs->tpc);
5387 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5388 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
5389 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5390 +
5391 + if (err)
5392 + break;
5393 +
5394 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5395 + mov1 == 0x8210000FU &&
5396 + (call & 0xC0000000U) == 0x40000000U &&
5397 + mov2 == 0x9E100001U)
5398 + {
5399 + unsigned long addr;
5400 +
5401 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5402 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5403 +
5404 + if (test_thread_flag(TIF_32BIT))
5405 + addr &= 0xFFFFFFFFUL;
5406 +
5407 + regs->tpc = addr;
5408 + regs->tnpc = addr+4;
5409 + return 2;
5410 + }
5411 + } while (0);
5412 +
5413 + do { /* PaX: patched PLT emulation #5 */
5414 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5415 +
5416 + err = get_user(sethi, (unsigned int *)regs->tpc);
5417 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5418 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5419 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5420 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5421 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5422 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5423 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5424 +
5425 + if (err)
5426 + break;
5427 +
5428 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5429 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5430 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5431 + (or1 & 0xFFFFE000U) == 0x82106000U &&
5432 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5433 + sllx == 0x83287020U &&
5434 + jmpl == 0x81C04005U &&
5435 + nop == 0x01000000U)
5436 + {
5437 + unsigned long addr;
5438 +
5439 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5440 + regs->u_regs[UREG_G1] <<= 32;
5441 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5442 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5443 + regs->tpc = addr;
5444 + regs->tnpc = addr+4;
5445 + return 2;
5446 + }
5447 + } while (0);
5448 +
5449 + do { /* PaX: patched PLT emulation #6 */
5450 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5451 +
5452 + err = get_user(sethi, (unsigned int *)regs->tpc);
5453 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5454 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5455 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5456 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
5457 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5458 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5459 +
5460 + if (err)
5461 + break;
5462 +
5463 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5464 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
5465 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5466 + sllx == 0x83287020U &&
5467 + (or & 0xFFFFE000U) == 0x8A116000U &&
5468 + jmpl == 0x81C04005U &&
5469 + nop == 0x01000000U)
5470 + {
5471 + unsigned long addr;
5472 +
5473 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5474 + regs->u_regs[UREG_G1] <<= 32;
5475 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5476 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5477 + regs->tpc = addr;
5478 + regs->tnpc = addr+4;
5479 + return 2;
5480 + }
5481 + } while (0);
5482 +
5483 + do { /* PaX: unpatched PLT emulation step 1 */
5484 + unsigned int sethi, ba, nop;
5485 +
5486 + err = get_user(sethi, (unsigned int *)regs->tpc);
5487 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5488 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5489 +
5490 + if (err)
5491 + break;
5492 +
5493 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5494 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5495 + nop == 0x01000000U)
5496 + {
5497 + unsigned long addr;
5498 + unsigned int save, call;
5499 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5500 +
5501 + if ((ba & 0xFFC00000U) == 0x30800000U)
5502 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5503 + else
5504 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5505 +
5506 + if (test_thread_flag(TIF_32BIT))
5507 + addr &= 0xFFFFFFFFUL;
5508 +
5509 + err = get_user(save, (unsigned int *)addr);
5510 + err |= get_user(call, (unsigned int *)(addr+4));
5511 + err |= get_user(nop, (unsigned int *)(addr+8));
5512 + if (err)
5513 + break;
5514 +
5515 +#ifdef CONFIG_PAX_DLRESOLVE
5516 + if (save == 0x9DE3BFA8U &&
5517 + (call & 0xC0000000U) == 0x40000000U &&
5518 + nop == 0x01000000U)
5519 + {
5520 + struct vm_area_struct *vma;
5521 + unsigned long call_dl_resolve;
5522 +
5523 + down_read(&current->mm->mmap_sem);
5524 + call_dl_resolve = current->mm->call_dl_resolve;
5525 + up_read(&current->mm->mmap_sem);
5526 + if (likely(call_dl_resolve))
5527 + goto emulate;
5528 +
5529 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5530 +
5531 + down_write(&current->mm->mmap_sem);
5532 + if (current->mm->call_dl_resolve) {
5533 + call_dl_resolve = current->mm->call_dl_resolve;
5534 + up_write(&current->mm->mmap_sem);
5535 + if (vma)
5536 + kmem_cache_free(vm_area_cachep, vma);
5537 + goto emulate;
5538 + }
5539 +
5540 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5541 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5542 + up_write(&current->mm->mmap_sem);
5543 + if (vma)
5544 + kmem_cache_free(vm_area_cachep, vma);
5545 + return 1;
5546 + }
5547 +
5548 + if (pax_insert_vma(vma, call_dl_resolve)) {
5549 + up_write(&current->mm->mmap_sem);
5550 + kmem_cache_free(vm_area_cachep, vma);
5551 + return 1;
5552 + }
5553 +
5554 + current->mm->call_dl_resolve = call_dl_resolve;
5555 + up_write(&current->mm->mmap_sem);
5556 +
5557 +emulate:
5558 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5559 + regs->tpc = call_dl_resolve;
5560 + regs->tnpc = addr+4;
5561 + return 3;
5562 + }
5563 +#endif
5564 +
5565 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5566 + if ((save & 0xFFC00000U) == 0x05000000U &&
5567 + (call & 0xFFFFE000U) == 0x85C0A000U &&
5568 + nop == 0x01000000U)
5569 + {
5570 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5571 + regs->u_regs[UREG_G2] = addr + 4;
5572 + addr = (save & 0x003FFFFFU) << 10;
5573 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5574 +
5575 + if (test_thread_flag(TIF_32BIT))
5576 + addr &= 0xFFFFFFFFUL;
5577 +
5578 + regs->tpc = addr;
5579 + regs->tnpc = addr+4;
5580 + return 3;
5581 + }
5582 +
5583 + /* PaX: 64-bit PLT stub */
5584 + err = get_user(sethi1, (unsigned int *)addr);
5585 + err |= get_user(sethi2, (unsigned int *)(addr+4));
5586 + err |= get_user(or1, (unsigned int *)(addr+8));
5587 + err |= get_user(or2, (unsigned int *)(addr+12));
5588 + err |= get_user(sllx, (unsigned int *)(addr+16));
5589 + err |= get_user(add, (unsigned int *)(addr+20));
5590 + err |= get_user(jmpl, (unsigned int *)(addr+24));
5591 + err |= get_user(nop, (unsigned int *)(addr+28));
5592 + if (err)
5593 + break;
5594 +
5595 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5596 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5597 + (or1 & 0xFFFFE000U) == 0x88112000U &&
5598 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
5599 + sllx == 0x89293020U &&
5600 + add == 0x8A010005U &&
5601 + jmpl == 0x89C14000U &&
5602 + nop == 0x01000000U)
5603 + {
5604 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5605 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5606 + regs->u_regs[UREG_G4] <<= 32;
5607 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5608 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5609 + regs->u_regs[UREG_G4] = addr + 24;
5610 + addr = regs->u_regs[UREG_G5];
5611 + regs->tpc = addr;
5612 + regs->tnpc = addr+4;
5613 + return 3;
5614 + }
5615 + }
5616 + } while (0);
5617 +
5618 +#ifdef CONFIG_PAX_DLRESOLVE
5619 + do { /* PaX: unpatched PLT emulation step 2 */
5620 + unsigned int save, call, nop;
5621 +
5622 + err = get_user(save, (unsigned int *)(regs->tpc-4));
5623 + err |= get_user(call, (unsigned int *)regs->tpc);
5624 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5625 + if (err)
5626 + break;
5627 +
5628 + if (save == 0x9DE3BFA8U &&
5629 + (call & 0xC0000000U) == 0x40000000U &&
5630 + nop == 0x01000000U)
5631 + {
5632 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5633 +
5634 + if (test_thread_flag(TIF_32BIT))
5635 + dl_resolve &= 0xFFFFFFFFUL;
5636 +
5637 + regs->u_regs[UREG_RETPC] = regs->tpc;
5638 + regs->tpc = dl_resolve;
5639 + regs->tnpc = dl_resolve+4;
5640 + return 3;
5641 + }
5642 + } while (0);
5643 +#endif
5644 +
5645 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5646 + unsigned int sethi, ba, nop;
5647 +
5648 + err = get_user(sethi, (unsigned int *)regs->tpc);
5649 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5650 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5651 +
5652 + if (err)
5653 + break;
5654 +
5655 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
5656 + (ba & 0xFFF00000U) == 0x30600000U &&
5657 + nop == 0x01000000U)
5658 + {
5659 + unsigned long addr;
5660 +
5661 + addr = (sethi & 0x003FFFFFU) << 10;
5662 + regs->u_regs[UREG_G1] = addr;
5663 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5664 +
5665 + if (test_thread_flag(TIF_32BIT))
5666 + addr &= 0xFFFFFFFFUL;
5667 +
5668 + regs->tpc = addr;
5669 + regs->tnpc = addr+4;
5670 + return 2;
5671 + }
5672 + } while (0);
5673 +
5674 +#endif
5675 +
5676 + return 1;
5677 +}
5678 +
5679 +void pax_report_insns(void *pc, void *sp)
5680 +{
5681 + unsigned long i;
5682 +
5683 + printk(KERN_ERR "PAX: bytes at PC: ");
5684 + for (i = 0; i < 8; i++) {
5685 + unsigned int c;
5686 + if (get_user(c, (unsigned int *)pc+i))
5687 + printk(KERN_CONT "???????? ");
5688 + else
5689 + printk(KERN_CONT "%08x ", c);
5690 + }
5691 + printk("\n");
5692 +}
5693 +#endif
5694 +
5695 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5696 {
5697 struct mm_struct *mm = current->mm;
5698 @@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5699 if (!vma)
5700 goto bad_area;
5701
5702 +#ifdef CONFIG_PAX_PAGEEXEC
5703 + /* PaX: detect ITLB misses on non-exec pages */
5704 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5705 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5706 + {
5707 + if (address != regs->tpc)
5708 + goto good_area;
5709 +
5710 + up_read(&mm->mmap_sem);
5711 + switch (pax_handle_fetch_fault(regs)) {
5712 +
5713 +#ifdef CONFIG_PAX_EMUPLT
5714 + case 2:
5715 + case 3:
5716 + return;
5717 +#endif
5718 +
5719 + }
5720 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5721 + do_group_exit(SIGKILL);
5722 + }
5723 +#endif
5724 +
5725 /* Pure DTLB misses do not tell us whether the fault causing
5726 * load/store/atomic was a write or not, it only says that there
5727 * was no match. So in such a case we (carefully) read the
5728 diff -urNp linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c
5729 --- linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
5730 +++ linux-2.6.32.42/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
5731 @@ -69,7 +69,7 @@ full_search:
5732 }
5733 return -ENOMEM;
5734 }
5735 - if (likely(!vma || addr + len <= vma->vm_start)) {
5736 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5737 /*
5738 * Remember the place where we stopped the search:
5739 */
5740 @@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
5741 /* make sure it can fit in the remaining address space */
5742 if (likely(addr > len)) {
5743 vma = find_vma(mm, addr-len);
5744 - if (!vma || addr <= vma->vm_start) {
5745 + if (check_heap_stack_gap(vma, addr - len, len)) {
5746 /* remember the address as a hint for next time */
5747 return (mm->free_area_cache = addr-len);
5748 }
5749 @@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
5750 if (unlikely(mm->mmap_base < len))
5751 goto bottomup;
5752
5753 - addr = (mm->mmap_base-len) & HPAGE_MASK;
5754 + addr = mm->mmap_base - len;
5755
5756 do {
5757 + addr &= HPAGE_MASK;
5758 /*
5759 * Lookup failure means no vma is above this address,
5760 * else if new region fits below vma->vm_start,
5761 * return with success:
5762 */
5763 vma = find_vma(mm, addr);
5764 - if (likely(!vma || addr+len <= vma->vm_start)) {
5765 + if (likely(check_heap_stack_gap(vma, addr, len))) {
5766 /* remember the address as a hint for next time */
5767 return (mm->free_area_cache = addr);
5768 }
5769 @@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
5770 mm->cached_hole_size = vma->vm_start - addr;
5771
5772 /* try just below the current vma->vm_start */
5773 - addr = (vma->vm_start-len) & HPAGE_MASK;
5774 - } while (likely(len < vma->vm_start));
5775 + addr = skip_heap_stack_gap(vma, len);
5776 + } while (!IS_ERR_VALUE(addr));
5777
5778 bottomup:
5779 /*
5780 @@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
5781 if (addr) {
5782 addr = ALIGN(addr, HPAGE_SIZE);
5783 vma = find_vma(mm, addr);
5784 - if (task_size - len >= addr &&
5785 - (!vma || addr + len <= vma->vm_start))
5786 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5787 return addr;
5788 }
5789 if (mm->get_unmapped_area == arch_get_unmapped_area)
5790 diff -urNp linux-2.6.32.42/arch/sparc/mm/init_32.c linux-2.6.32.42/arch/sparc/mm/init_32.c
5791 --- linux-2.6.32.42/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
5792 +++ linux-2.6.32.42/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
5793 @@ -317,6 +317,9 @@ extern void device_scan(void);
5794 pgprot_t PAGE_SHARED __read_mostly;
5795 EXPORT_SYMBOL(PAGE_SHARED);
5796
5797 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5798 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5799 +
5800 void __init paging_init(void)
5801 {
5802 switch(sparc_cpu_model) {
5803 @@ -345,17 +348,17 @@ void __init paging_init(void)
5804
5805 /* Initialize the protection map with non-constant, MMU dependent values. */
5806 protection_map[0] = PAGE_NONE;
5807 - protection_map[1] = PAGE_READONLY;
5808 - protection_map[2] = PAGE_COPY;
5809 - protection_map[3] = PAGE_COPY;
5810 + protection_map[1] = PAGE_READONLY_NOEXEC;
5811 + protection_map[2] = PAGE_COPY_NOEXEC;
5812 + protection_map[3] = PAGE_COPY_NOEXEC;
5813 protection_map[4] = PAGE_READONLY;
5814 protection_map[5] = PAGE_READONLY;
5815 protection_map[6] = PAGE_COPY;
5816 protection_map[7] = PAGE_COPY;
5817 protection_map[8] = PAGE_NONE;
5818 - protection_map[9] = PAGE_READONLY;
5819 - protection_map[10] = PAGE_SHARED;
5820 - protection_map[11] = PAGE_SHARED;
5821 + protection_map[9] = PAGE_READONLY_NOEXEC;
5822 + protection_map[10] = PAGE_SHARED_NOEXEC;
5823 + protection_map[11] = PAGE_SHARED_NOEXEC;
5824 protection_map[12] = PAGE_READONLY;
5825 protection_map[13] = PAGE_READONLY;
5826 protection_map[14] = PAGE_SHARED;
5827 diff -urNp linux-2.6.32.42/arch/sparc/mm/Makefile linux-2.6.32.42/arch/sparc/mm/Makefile
5828 --- linux-2.6.32.42/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
5829 +++ linux-2.6.32.42/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
5830 @@ -2,7 +2,7 @@
5831 #
5832
5833 asflags-y := -ansi
5834 -ccflags-y := -Werror
5835 +#ccflags-y := -Werror
5836
5837 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5838 obj-y += fault_$(BITS).o
5839 diff -urNp linux-2.6.32.42/arch/sparc/mm/srmmu.c linux-2.6.32.42/arch/sparc/mm/srmmu.c
5840 --- linux-2.6.32.42/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
5841 +++ linux-2.6.32.42/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
5842 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5843 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5844 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5845 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5846 +
5847 +#ifdef CONFIG_PAX_PAGEEXEC
5848 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5849 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5850 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5851 +#endif
5852 +
5853 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5854 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5855
5856 diff -urNp linux-2.6.32.42/arch/um/include/asm/kmap_types.h linux-2.6.32.42/arch/um/include/asm/kmap_types.h
5857 --- linux-2.6.32.42/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
5858 +++ linux-2.6.32.42/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
5859 @@ -23,6 +23,7 @@ enum km_type {
5860 KM_IRQ1,
5861 KM_SOFTIRQ0,
5862 KM_SOFTIRQ1,
5863 + KM_CLEARPAGE,
5864 KM_TYPE_NR
5865 };
5866
5867 diff -urNp linux-2.6.32.42/arch/um/include/asm/page.h linux-2.6.32.42/arch/um/include/asm/page.h
5868 --- linux-2.6.32.42/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
5869 +++ linux-2.6.32.42/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
5870 @@ -14,6 +14,9 @@
5871 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5872 #define PAGE_MASK (~(PAGE_SIZE-1))
5873
5874 +#define ktla_ktva(addr) (addr)
5875 +#define ktva_ktla(addr) (addr)
5876 +
5877 #ifndef __ASSEMBLY__
5878
5879 struct page;
5880 diff -urNp linux-2.6.32.42/arch/um/kernel/process.c linux-2.6.32.42/arch/um/kernel/process.c
5881 --- linux-2.6.32.42/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
5882 +++ linux-2.6.32.42/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
5883 @@ -393,22 +393,6 @@ int singlestepping(void * t)
5884 return 2;
5885 }
5886
5887 -/*
5888 - * Only x86 and x86_64 have an arch_align_stack().
5889 - * All other arches have "#define arch_align_stack(x) (x)"
5890 - * in their asm/system.h
5891 - * As this is included in UML from asm-um/system-generic.h,
5892 - * we can use it to behave as the subarch does.
5893 - */
5894 -#ifndef arch_align_stack
5895 -unsigned long arch_align_stack(unsigned long sp)
5896 -{
5897 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5898 - sp -= get_random_int() % 8192;
5899 - return sp & ~0xf;
5900 -}
5901 -#endif
5902 -
5903 unsigned long get_wchan(struct task_struct *p)
5904 {
5905 unsigned long stack_page, sp, ip;
5906 diff -urNp linux-2.6.32.42/arch/um/sys-i386/syscalls.c linux-2.6.32.42/arch/um/sys-i386/syscalls.c
5907 --- linux-2.6.32.42/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
5908 +++ linux-2.6.32.42/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
5909 @@ -11,6 +11,21 @@
5910 #include "asm/uaccess.h"
5911 #include "asm/unistd.h"
5912
5913 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5914 +{
5915 + unsigned long pax_task_size = TASK_SIZE;
5916 +
5917 +#ifdef CONFIG_PAX_SEGMEXEC
5918 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5919 + pax_task_size = SEGMEXEC_TASK_SIZE;
5920 +#endif
5921 +
5922 + if (len > pax_task_size || addr > pax_task_size - len)
5923 + return -EINVAL;
5924 +
5925 + return 0;
5926 +}
5927 +
5928 /*
5929 * Perform the select(nd, in, out, ex, tv) and mmap() system
5930 * calls. Linux/i386 didn't use to be able to handle more than
5931 diff -urNp linux-2.6.32.42/arch/x86/boot/bitops.h linux-2.6.32.42/arch/x86/boot/bitops.h
5932 --- linux-2.6.32.42/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
5933 +++ linux-2.6.32.42/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
5934 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5935 u8 v;
5936 const u32 *p = (const u32 *)addr;
5937
5938 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5939 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5940 return v;
5941 }
5942
5943 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5944
5945 static inline void set_bit(int nr, void *addr)
5946 {
5947 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5948 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5949 }
5950
5951 #endif /* BOOT_BITOPS_H */
5952 diff -urNp linux-2.6.32.42/arch/x86/boot/boot.h linux-2.6.32.42/arch/x86/boot/boot.h
5953 --- linux-2.6.32.42/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
5954 +++ linux-2.6.32.42/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
5955 @@ -82,7 +82,7 @@ static inline void io_delay(void)
5956 static inline u16 ds(void)
5957 {
5958 u16 seg;
5959 - asm("movw %%ds,%0" : "=rm" (seg));
5960 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5961 return seg;
5962 }
5963
5964 @@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
5965 static inline int memcmp(const void *s1, const void *s2, size_t len)
5966 {
5967 u8 diff;
5968 - asm("repe; cmpsb; setnz %0"
5969 + asm volatile("repe; cmpsb; setnz %0"
5970 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5971 return diff;
5972 }
5973 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/head_32.S linux-2.6.32.42/arch/x86/boot/compressed/head_32.S
5974 --- linux-2.6.32.42/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
5975 +++ linux-2.6.32.42/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
5976 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5977 notl %eax
5978 andl %eax, %ebx
5979 #else
5980 - movl $LOAD_PHYSICAL_ADDR, %ebx
5981 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5982 #endif
5983
5984 /* Target address to relocate to for decompression */
5985 @@ -149,7 +149,7 @@ relocated:
5986 * and where it was actually loaded.
5987 */
5988 movl %ebp, %ebx
5989 - subl $LOAD_PHYSICAL_ADDR, %ebx
5990 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5991 jz 2f /* Nothing to be done if loaded at compiled addr. */
5992 /*
5993 * Process relocations.
5994 @@ -157,8 +157,7 @@ relocated:
5995
5996 1: subl $4, %edi
5997 movl (%edi), %ecx
5998 - testl %ecx, %ecx
5999 - jz 2f
6000 + jecxz 2f
6001 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6002 jmp 1b
6003 2:
6004 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/head_64.S linux-2.6.32.42/arch/x86/boot/compressed/head_64.S
6005 --- linux-2.6.32.42/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6006 +++ linux-2.6.32.42/arch/x86/boot/compressed/head_64.S 2011-04-17 15:56:46.000000000 -0400
6007 @@ -91,7 +91,7 @@ ENTRY(startup_32)
6008 notl %eax
6009 andl %eax, %ebx
6010 #else
6011 - movl $LOAD_PHYSICAL_ADDR, %ebx
6012 + movl $____LOAD_PHYSICAL_ADDR, %ebx
6013 #endif
6014
6015 /* Target address to relocate to for decompression */
6016 @@ -234,7 +234,7 @@ ENTRY(startup_64)
6017 notq %rax
6018 andq %rax, %rbp
6019 #else
6020 - movq $LOAD_PHYSICAL_ADDR, %rbp
6021 + movq $____LOAD_PHYSICAL_ADDR, %rbp
6022 #endif
6023
6024 /* Target address to relocate to for decompression */
6025 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/misc.c linux-2.6.32.42/arch/x86/boot/compressed/misc.c
6026 --- linux-2.6.32.42/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6027 +++ linux-2.6.32.42/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6028 @@ -288,7 +288,7 @@ static void parse_elf(void *output)
6029 case PT_LOAD:
6030 #ifdef CONFIG_RELOCATABLE
6031 dest = output;
6032 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6033 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6034 #else
6035 dest = (void *)(phdr->p_paddr);
6036 #endif
6037 @@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6038 error("Destination address too large");
6039 #endif
6040 #ifndef CONFIG_RELOCATABLE
6041 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6042 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6043 error("Wrong destination address");
6044 #endif
6045
6046 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c
6047 --- linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6048 +++ linux-2.6.32.42/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6049 @@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6050
6051 offs = (olen > ilen) ? olen - ilen : 0;
6052 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6053 - offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6054 + offs += 64*1024; /* Add 64K bytes slack */
6055 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6056
6057 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6058 diff -urNp linux-2.6.32.42/arch/x86/boot/compressed/relocs.c linux-2.6.32.42/arch/x86/boot/compressed/relocs.c
6059 --- linux-2.6.32.42/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6060 +++ linux-2.6.32.42/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6061 @@ -10,8 +10,11 @@
6062 #define USE_BSD
6063 #include <endian.h>
6064
6065 +#include "../../../../include/linux/autoconf.h"
6066 +
6067 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6068 static Elf32_Ehdr ehdr;
6069 +static Elf32_Phdr *phdr;
6070 static unsigned long reloc_count, reloc_idx;
6071 static unsigned long *relocs;
6072
6073 @@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6074
6075 static int is_safe_abs_reloc(const char* sym_name)
6076 {
6077 - int i;
6078 + unsigned int i;
6079
6080 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6081 if (!strcmp(sym_name, safe_abs_relocs[i]))
6082 @@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6083 }
6084 }
6085
6086 +static void read_phdrs(FILE *fp)
6087 +{
6088 + unsigned int i;
6089 +
6090 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6091 + if (!phdr) {
6092 + die("Unable to allocate %d program headers\n",
6093 + ehdr.e_phnum);
6094 + }
6095 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6096 + die("Seek to %d failed: %s\n",
6097 + ehdr.e_phoff, strerror(errno));
6098 + }
6099 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6100 + die("Cannot read ELF program headers: %s\n",
6101 + strerror(errno));
6102 + }
6103 + for(i = 0; i < ehdr.e_phnum; i++) {
6104 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6105 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6106 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6107 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6108 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6109 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6110 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6111 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6112 + }
6113 +
6114 +}
6115 +
6116 static void read_shdrs(FILE *fp)
6117 {
6118 - int i;
6119 + unsigned int i;
6120 Elf32_Shdr shdr;
6121
6122 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6123 @@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6124
6125 static void read_strtabs(FILE *fp)
6126 {
6127 - int i;
6128 + unsigned int i;
6129 for (i = 0; i < ehdr.e_shnum; i++) {
6130 struct section *sec = &secs[i];
6131 if (sec->shdr.sh_type != SHT_STRTAB) {
6132 @@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6133
6134 static void read_symtabs(FILE *fp)
6135 {
6136 - int i,j;
6137 + unsigned int i,j;
6138 for (i = 0; i < ehdr.e_shnum; i++) {
6139 struct section *sec = &secs[i];
6140 if (sec->shdr.sh_type != SHT_SYMTAB) {
6141 @@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6142
6143 static void read_relocs(FILE *fp)
6144 {
6145 - int i,j;
6146 + unsigned int i,j;
6147 + uint32_t base;
6148 +
6149 for (i = 0; i < ehdr.e_shnum; i++) {
6150 struct section *sec = &secs[i];
6151 if (sec->shdr.sh_type != SHT_REL) {
6152 @@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6153 die("Cannot read symbol table: %s\n",
6154 strerror(errno));
6155 }
6156 + base = 0;
6157 + for (j = 0; j < ehdr.e_phnum; j++) {
6158 + if (phdr[j].p_type != PT_LOAD )
6159 + continue;
6160 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6161 + continue;
6162 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6163 + break;
6164 + }
6165 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6166 Elf32_Rel *rel = &sec->reltab[j];
6167 - rel->r_offset = elf32_to_cpu(rel->r_offset);
6168 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6169 rel->r_info = elf32_to_cpu(rel->r_info);
6170 }
6171 }
6172 @@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6173
6174 static void print_absolute_symbols(void)
6175 {
6176 - int i;
6177 + unsigned int i;
6178 printf("Absolute symbols\n");
6179 printf(" Num: Value Size Type Bind Visibility Name\n");
6180 for (i = 0; i < ehdr.e_shnum; i++) {
6181 struct section *sec = &secs[i];
6182 char *sym_strtab;
6183 Elf32_Sym *sh_symtab;
6184 - int j;
6185 + unsigned int j;
6186
6187 if (sec->shdr.sh_type != SHT_SYMTAB) {
6188 continue;
6189 @@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6190
6191 static void print_absolute_relocs(void)
6192 {
6193 - int i, printed = 0;
6194 + unsigned int i, printed = 0;
6195
6196 for (i = 0; i < ehdr.e_shnum; i++) {
6197 struct section *sec = &secs[i];
6198 struct section *sec_applies, *sec_symtab;
6199 char *sym_strtab;
6200 Elf32_Sym *sh_symtab;
6201 - int j;
6202 + unsigned int j;
6203 if (sec->shdr.sh_type != SHT_REL) {
6204 continue;
6205 }
6206 @@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6207
6208 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6209 {
6210 - int i;
6211 + unsigned int i;
6212 /* Walk through the relocations */
6213 for (i = 0; i < ehdr.e_shnum; i++) {
6214 char *sym_strtab;
6215 Elf32_Sym *sh_symtab;
6216 struct section *sec_applies, *sec_symtab;
6217 - int j;
6218 + unsigned int j;
6219 struct section *sec = &secs[i];
6220
6221 if (sec->shdr.sh_type != SHT_REL) {
6222 @@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6223 if (sym->st_shndx == SHN_ABS) {
6224 continue;
6225 }
6226 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6227 + if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6228 + continue;
6229 +
6230 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6231 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6232 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6233 + continue;
6234 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6235 + continue;
6236 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6237 + continue;
6238 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6239 + continue;
6240 +#endif
6241 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6242 /*
6243 * NONE can be ignored and and PC relative
6244 @@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6245
6246 static void emit_relocs(int as_text)
6247 {
6248 - int i;
6249 + unsigned int i;
6250 /* Count how many relocations I have and allocate space for them. */
6251 reloc_count = 0;
6252 walk_relocs(count_reloc);
6253 @@ -634,6 +693,7 @@ int main(int argc, char **argv)
6254 fname, strerror(errno));
6255 }
6256 read_ehdr(fp);
6257 + read_phdrs(fp);
6258 read_shdrs(fp);
6259 read_strtabs(fp);
6260 read_symtabs(fp);
6261 diff -urNp linux-2.6.32.42/arch/x86/boot/cpucheck.c linux-2.6.32.42/arch/x86/boot/cpucheck.c
6262 --- linux-2.6.32.42/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6263 +++ linux-2.6.32.42/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6264 @@ -74,7 +74,7 @@ static int has_fpu(void)
6265 u16 fcw = -1, fsw = -1;
6266 u32 cr0;
6267
6268 - asm("movl %%cr0,%0" : "=r" (cr0));
6269 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
6270 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6271 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6272 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6273 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6274 {
6275 u32 f0, f1;
6276
6277 - asm("pushfl ; "
6278 + asm volatile("pushfl ; "
6279 "pushfl ; "
6280 "popl %0 ; "
6281 "movl %0,%1 ; "
6282 @@ -115,7 +115,7 @@ static void get_flags(void)
6283 set_bit(X86_FEATURE_FPU, cpu.flags);
6284
6285 if (has_eflag(X86_EFLAGS_ID)) {
6286 - asm("cpuid"
6287 + asm volatile("cpuid"
6288 : "=a" (max_intel_level),
6289 "=b" (cpu_vendor[0]),
6290 "=d" (cpu_vendor[1]),
6291 @@ -124,7 +124,7 @@ static void get_flags(void)
6292
6293 if (max_intel_level >= 0x00000001 &&
6294 max_intel_level <= 0x0000ffff) {
6295 - asm("cpuid"
6296 + asm volatile("cpuid"
6297 : "=a" (tfms),
6298 "=c" (cpu.flags[4]),
6299 "=d" (cpu.flags[0])
6300 @@ -136,7 +136,7 @@ static void get_flags(void)
6301 cpu.model += ((tfms >> 16) & 0xf) << 4;
6302 }
6303
6304 - asm("cpuid"
6305 + asm volatile("cpuid"
6306 : "=a" (max_amd_level)
6307 : "a" (0x80000000)
6308 : "ebx", "ecx", "edx");
6309 @@ -144,7 +144,7 @@ static void get_flags(void)
6310 if (max_amd_level >= 0x80000001 &&
6311 max_amd_level <= 0x8000ffff) {
6312 u32 eax = 0x80000001;
6313 - asm("cpuid"
6314 + asm volatile("cpuid"
6315 : "+a" (eax),
6316 "=c" (cpu.flags[6]),
6317 "=d" (cpu.flags[1])
6318 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6319 u32 ecx = MSR_K7_HWCR;
6320 u32 eax, edx;
6321
6322 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6323 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6324 eax &= ~(1 << 15);
6325 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6326 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6327
6328 get_flags(); /* Make sure it really did something */
6329 err = check_flags();
6330 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6331 u32 ecx = MSR_VIA_FCR;
6332 u32 eax, edx;
6333
6334 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6335 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6336 eax |= (1<<1)|(1<<7);
6337 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6338 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6339
6340 set_bit(X86_FEATURE_CX8, cpu.flags);
6341 err = check_flags();
6342 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6343 u32 eax, edx;
6344 u32 level = 1;
6345
6346 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6347 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6348 - asm("cpuid"
6349 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6350 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6351 + asm volatile("cpuid"
6352 : "+a" (level), "=d" (cpu.flags[0])
6353 : : "ecx", "ebx");
6354 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6355 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6356
6357 err = check_flags();
6358 }
6359 diff -urNp linux-2.6.32.42/arch/x86/boot/header.S linux-2.6.32.42/arch/x86/boot/header.S
6360 --- linux-2.6.32.42/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6361 +++ linux-2.6.32.42/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6362 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6363 # single linked list of
6364 # struct setup_data
6365
6366 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6367 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6368
6369 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6370 #define VO_INIT_SIZE (VO__end - VO__text)
6371 diff -urNp linux-2.6.32.42/arch/x86/boot/memory.c linux-2.6.32.42/arch/x86/boot/memory.c
6372 --- linux-2.6.32.42/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6373 +++ linux-2.6.32.42/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6374 @@ -19,7 +19,7 @@
6375
6376 static int detect_memory_e820(void)
6377 {
6378 - int count = 0;
6379 + unsigned int count = 0;
6380 struct biosregs ireg, oreg;
6381 struct e820entry *desc = boot_params.e820_map;
6382 static struct e820entry buf; /* static so it is zeroed */
6383 diff -urNp linux-2.6.32.42/arch/x86/boot/video.c linux-2.6.32.42/arch/x86/boot/video.c
6384 --- linux-2.6.32.42/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6385 +++ linux-2.6.32.42/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6386 @@ -90,7 +90,7 @@ static void store_mode_params(void)
6387 static unsigned int get_entry(void)
6388 {
6389 char entry_buf[4];
6390 - int i, len = 0;
6391 + unsigned int i, len = 0;
6392 int key;
6393 unsigned int v;
6394
6395 diff -urNp linux-2.6.32.42/arch/x86/boot/video-vesa.c linux-2.6.32.42/arch/x86/boot/video-vesa.c
6396 --- linux-2.6.32.42/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6397 +++ linux-2.6.32.42/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6398 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6399
6400 boot_params.screen_info.vesapm_seg = oreg.es;
6401 boot_params.screen_info.vesapm_off = oreg.di;
6402 + boot_params.screen_info.vesapm_size = oreg.cx;
6403 }
6404
6405 /*
6406 diff -urNp linux-2.6.32.42/arch/x86/ia32/ia32_aout.c linux-2.6.32.42/arch/x86/ia32/ia32_aout.c
6407 --- linux-2.6.32.42/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6408 +++ linux-2.6.32.42/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6409 @@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6410 unsigned long dump_start, dump_size;
6411 struct user32 dump;
6412
6413 + memset(&dump, 0, sizeof(dump));
6414 +
6415 fs = get_fs();
6416 set_fs(KERNEL_DS);
6417 has_dumped = 1;
6418 @@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6419 dump_size = dump.u_ssize << PAGE_SHIFT;
6420 DUMP_WRITE(dump_start, dump_size);
6421 }
6422 - /*
6423 - * Finally dump the task struct. Not be used by gdb, but
6424 - * could be useful
6425 - */
6426 - set_fs(KERNEL_DS);
6427 - DUMP_WRITE(current, sizeof(*current));
6428 end_coredump:
6429 set_fs(fs);
6430 return has_dumped;
6431 diff -urNp linux-2.6.32.42/arch/x86/ia32/ia32entry.S linux-2.6.32.42/arch/x86/ia32/ia32entry.S
6432 --- linux-2.6.32.42/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6433 +++ linux-2.6.32.42/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6434 @@ -13,6 +13,7 @@
6435 #include <asm/thread_info.h>
6436 #include <asm/segment.h>
6437 #include <asm/irqflags.h>
6438 +#include <asm/pgtable.h>
6439 #include <linux/linkage.h>
6440
6441 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6442 @@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6443 ENDPROC(native_irq_enable_sysexit)
6444 #endif
6445
6446 + .macro pax_enter_kernel_user
6447 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6448 + call pax_enter_kernel_user
6449 +#endif
6450 + .endm
6451 +
6452 + .macro pax_exit_kernel_user
6453 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6454 + call pax_exit_kernel_user
6455 +#endif
6456 +#ifdef CONFIG_PAX_RANDKSTACK
6457 + pushq %rax
6458 + call pax_randomize_kstack
6459 + popq %rax
6460 +#endif
6461 + pax_erase_kstack
6462 + .endm
6463 +
6464 +.macro pax_erase_kstack
6465 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6466 + call pax_erase_kstack
6467 +#endif
6468 +.endm
6469 +
6470 /*
6471 * 32bit SYSENTER instruction entry.
6472 *
6473 @@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6474 CFI_REGISTER rsp,rbp
6475 SWAPGS_UNSAFE_STACK
6476 movq PER_CPU_VAR(kernel_stack), %rsp
6477 - addq $(KERNEL_STACK_OFFSET),%rsp
6478 + pax_enter_kernel_user
6479 /*
6480 * No need to follow this irqs on/off section: the syscall
6481 * disabled irqs, here we enable it straight after entry:
6482 @@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6483 pushfq
6484 CFI_ADJUST_CFA_OFFSET 8
6485 /*CFI_REL_OFFSET rflags,0*/
6486 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6487 + GET_THREAD_INFO(%r10)
6488 + movl TI_sysenter_return(%r10), %r10d
6489 CFI_REGISTER rip,r10
6490 pushq $__USER32_CS
6491 CFI_ADJUST_CFA_OFFSET 8
6492 @@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6493 SAVE_ARGS 0,0,1
6494 /* no need to do an access_ok check here because rbp has been
6495 32bit zero extended */
6496 +
6497 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6498 + mov $PAX_USER_SHADOW_BASE,%r10
6499 + add %r10,%rbp
6500 +#endif
6501 +
6502 1: movl (%rbp),%ebp
6503 .section __ex_table,"a"
6504 .quad 1b,ia32_badarg
6505 @@ -172,6 +204,7 @@ sysenter_dispatch:
6506 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6507 jnz sysexit_audit
6508 sysexit_from_sys_call:
6509 + pax_exit_kernel_user
6510 andl $~TS_COMPAT,TI_status(%r10)
6511 /* clear IF, that popfq doesn't enable interrupts early */
6512 andl $~0x200,EFLAGS-R11(%rsp)
6513 @@ -200,6 +233,9 @@ sysexit_from_sys_call:
6514 movl %eax,%esi /* 2nd arg: syscall number */
6515 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6516 call audit_syscall_entry
6517 +
6518 + pax_erase_kstack
6519 +
6520 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6521 cmpq $(IA32_NR_syscalls-1),%rax
6522 ja ia32_badsys
6523 @@ -252,6 +288,9 @@ sysenter_tracesys:
6524 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6525 movq %rsp,%rdi /* &pt_regs -> arg1 */
6526 call syscall_trace_enter
6527 +
6528 + pax_erase_kstack
6529 +
6530 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6531 RESTORE_REST
6532 cmpq $(IA32_NR_syscalls-1),%rax
6533 @@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6534 ENTRY(ia32_cstar_target)
6535 CFI_STARTPROC32 simple
6536 CFI_SIGNAL_FRAME
6537 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6538 + CFI_DEF_CFA rsp,0
6539 CFI_REGISTER rip,rcx
6540 /*CFI_REGISTER rflags,r11*/
6541 SWAPGS_UNSAFE_STACK
6542 movl %esp,%r8d
6543 CFI_REGISTER rsp,r8
6544 movq PER_CPU_VAR(kernel_stack),%rsp
6545 +
6546 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6547 + pax_enter_kernel_user
6548 +#endif
6549 +
6550 /*
6551 * No need to follow this irqs on/off section: the syscall
6552 * disabled irqs and here we enable it straight after entry:
6553 */
6554 ENABLE_INTERRUPTS(CLBR_NONE)
6555 - SAVE_ARGS 8,1,1
6556 + SAVE_ARGS 8*6,1,1
6557 movl %eax,%eax /* zero extension */
6558 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6559 movq %rcx,RIP-ARGOFFSET(%rsp)
6560 @@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6561 /* no need to do an access_ok check here because r8 has been
6562 32bit zero extended */
6563 /* hardware stack frame is complete now */
6564 +
6565 +#ifdef CONFIG_PAX_MEMORY_UDEREF
6566 + mov $PAX_USER_SHADOW_BASE,%r10
6567 + add %r10,%r8
6568 +#endif
6569 +
6570 1: movl (%r8),%r9d
6571 .section __ex_table,"a"
6572 .quad 1b,ia32_badarg
6573 @@ -333,6 +383,7 @@ cstar_dispatch:
6574 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6575 jnz sysretl_audit
6576 sysretl_from_sys_call:
6577 + pax_exit_kernel_user
6578 andl $~TS_COMPAT,TI_status(%r10)
6579 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6580 movl RIP-ARGOFFSET(%rsp),%ecx
6581 @@ -370,6 +421,9 @@ cstar_tracesys:
6582 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6583 movq %rsp,%rdi /* &pt_regs -> arg1 */
6584 call syscall_trace_enter
6585 +
6586 + pax_erase_kstack
6587 +
6588 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6589 RESTORE_REST
6590 xchgl %ebp,%r9d
6591 @@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6592 CFI_REL_OFFSET rip,RIP-RIP
6593 PARAVIRT_ADJUST_EXCEPTION_FRAME
6594 SWAPGS
6595 + pax_enter_kernel_user
6596 /*
6597 * No need to follow this irqs on/off section: the syscall
6598 * disabled irqs and here we enable it straight after entry:
6599 @@ -448,6 +503,9 @@ ia32_tracesys:
6600 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6601 movq %rsp,%rdi /* &pt_regs -> arg1 */
6602 call syscall_trace_enter
6603 +
6604 + pax_erase_kstack
6605 +
6606 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6607 RESTORE_REST
6608 cmpq $(IA32_NR_syscalls-1),%rax
6609 diff -urNp linux-2.6.32.42/arch/x86/ia32/ia32_signal.c linux-2.6.32.42/arch/x86/ia32/ia32_signal.c
6610 --- linux-2.6.32.42/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6611 +++ linux-2.6.32.42/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6612 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6613 sp -= frame_size;
6614 /* Align the stack pointer according to the i386 ABI,
6615 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6616 - sp = ((sp + 4) & -16ul) - 4;
6617 + sp = ((sp - 12) & -16ul) - 4;
6618 return (void __user *) sp;
6619 }
6620
6621 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6622 * These are actually not used anymore, but left because some
6623 * gdb versions depend on them as a marker.
6624 */
6625 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6626 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6627 } put_user_catch(err);
6628
6629 if (err)
6630 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6631 0xb8,
6632 __NR_ia32_rt_sigreturn,
6633 0x80cd,
6634 - 0,
6635 + 0
6636 };
6637
6638 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6639 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6640
6641 if (ka->sa.sa_flags & SA_RESTORER)
6642 restorer = ka->sa.sa_restorer;
6643 + else if (current->mm->context.vdso)
6644 + /* Return stub is in 32bit vsyscall page */
6645 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6646 else
6647 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6648 - rt_sigreturn);
6649 + restorer = &frame->retcode;
6650 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6651
6652 /*
6653 * Not actually used anymore, but left because some gdb
6654 * versions need it.
6655 */
6656 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6657 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6658 } put_user_catch(err);
6659
6660 if (err)
6661 diff -urNp linux-2.6.32.42/arch/x86/include/asm/alternative.h linux-2.6.32.42/arch/x86/include/asm/alternative.h
6662 --- linux-2.6.32.42/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6663 +++ linux-2.6.32.42/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6664 @@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6665 " .byte 662b-661b\n" /* sourcelen */ \
6666 " .byte 664f-663f\n" /* replacementlen */ \
6667 ".previous\n" \
6668 - ".section .altinstr_replacement, \"ax\"\n" \
6669 + ".section .altinstr_replacement, \"a\"\n" \
6670 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6671 ".previous"
6672
6673 diff -urNp linux-2.6.32.42/arch/x86/include/asm/apm.h linux-2.6.32.42/arch/x86/include/asm/apm.h
6674 --- linux-2.6.32.42/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
6675 +++ linux-2.6.32.42/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
6676 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6677 __asm__ __volatile__(APM_DO_ZERO_SEGS
6678 "pushl %%edi\n\t"
6679 "pushl %%ebp\n\t"
6680 - "lcall *%%cs:apm_bios_entry\n\t"
6681 + "lcall *%%ss:apm_bios_entry\n\t"
6682 "setc %%al\n\t"
6683 "popl %%ebp\n\t"
6684 "popl %%edi\n\t"
6685 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6686 __asm__ __volatile__(APM_DO_ZERO_SEGS
6687 "pushl %%edi\n\t"
6688 "pushl %%ebp\n\t"
6689 - "lcall *%%cs:apm_bios_entry\n\t"
6690 + "lcall *%%ss:apm_bios_entry\n\t"
6691 "setc %%bl\n\t"
6692 "popl %%ebp\n\t"
6693 "popl %%edi\n\t"
6694 diff -urNp linux-2.6.32.42/arch/x86/include/asm/atomic_32.h linux-2.6.32.42/arch/x86/include/asm/atomic_32.h
6695 --- linux-2.6.32.42/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
6696 +++ linux-2.6.32.42/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
6697 @@ -25,6 +25,17 @@ static inline int atomic_read(const atom
6698 }
6699
6700 /**
6701 + * atomic_read_unchecked - read atomic variable
6702 + * @v: pointer of type atomic_unchecked_t
6703 + *
6704 + * Atomically reads the value of @v.
6705 + */
6706 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6707 +{
6708 + return v->counter;
6709 +}
6710 +
6711 +/**
6712 * atomic_set - set atomic variable
6713 * @v: pointer of type atomic_t
6714 * @i: required value
6715 @@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
6716 }
6717
6718 /**
6719 + * atomic_set_unchecked - set atomic variable
6720 + * @v: pointer of type atomic_unchecked_t
6721 + * @i: required value
6722 + *
6723 + * Atomically sets the value of @v to @i.
6724 + */
6725 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6726 +{
6727 + v->counter = i;
6728 +}
6729 +
6730 +/**
6731 * atomic_add - add integer to atomic variable
6732 * @i: integer value to add
6733 * @v: pointer of type atomic_t
6734 @@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
6735 */
6736 static inline void atomic_add(int i, atomic_t *v)
6737 {
6738 - asm volatile(LOCK_PREFIX "addl %1,%0"
6739 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6740 +
6741 +#ifdef CONFIG_PAX_REFCOUNT
6742 + "jno 0f\n"
6743 + LOCK_PREFIX "subl %1,%0\n"
6744 + "int $4\n0:\n"
6745 + _ASM_EXTABLE(0b, 0b)
6746 +#endif
6747 +
6748 + : "+m" (v->counter)
6749 + : "ir" (i));
6750 +}
6751 +
6752 +/**
6753 + * atomic_add_unchecked - add integer to atomic variable
6754 + * @i: integer value to add
6755 + * @v: pointer of type atomic_unchecked_t
6756 + *
6757 + * Atomically adds @i to @v.
6758 + */
6759 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6760 +{
6761 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6762 : "+m" (v->counter)
6763 : "ir" (i));
6764 }
6765 @@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
6766 */
6767 static inline void atomic_sub(int i, atomic_t *v)
6768 {
6769 - asm volatile(LOCK_PREFIX "subl %1,%0"
6770 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6771 +
6772 +#ifdef CONFIG_PAX_REFCOUNT
6773 + "jno 0f\n"
6774 + LOCK_PREFIX "addl %1,%0\n"
6775 + "int $4\n0:\n"
6776 + _ASM_EXTABLE(0b, 0b)
6777 +#endif
6778 +
6779 + : "+m" (v->counter)
6780 + : "ir" (i));
6781 +}
6782 +
6783 +/**
6784 + * atomic_sub_unchecked - subtract integer from atomic variable
6785 + * @i: integer value to subtract
6786 + * @v: pointer of type atomic_unchecked_t
6787 + *
6788 + * Atomically subtracts @i from @v.
6789 + */
6790 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6791 +{
6792 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6793 : "+m" (v->counter)
6794 : "ir" (i));
6795 }
6796 @@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
6797 {
6798 unsigned char c;
6799
6800 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6801 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6802 +
6803 +#ifdef CONFIG_PAX_REFCOUNT
6804 + "jno 0f\n"
6805 + LOCK_PREFIX "addl %2,%0\n"
6806 + "int $4\n0:\n"
6807 + _ASM_EXTABLE(0b, 0b)
6808 +#endif
6809 +
6810 + "sete %1\n"
6811 : "+m" (v->counter), "=qm" (c)
6812 : "ir" (i) : "memory");
6813 return c;
6814 @@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
6815 */
6816 static inline void atomic_inc(atomic_t *v)
6817 {
6818 - asm volatile(LOCK_PREFIX "incl %0"
6819 + asm volatile(LOCK_PREFIX "incl %0\n"
6820 +
6821 +#ifdef CONFIG_PAX_REFCOUNT
6822 + "jno 0f\n"
6823 + LOCK_PREFIX "decl %0\n"
6824 + "int $4\n0:\n"
6825 + _ASM_EXTABLE(0b, 0b)
6826 +#endif
6827 +
6828 + : "+m" (v->counter));
6829 +}
6830 +
6831 +/**
6832 + * atomic_inc_unchecked - increment atomic variable
6833 + * @v: pointer of type atomic_unchecked_t
6834 + *
6835 + * Atomically increments @v by 1.
6836 + */
6837 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6838 +{
6839 + asm volatile(LOCK_PREFIX "incl %0\n"
6840 : "+m" (v->counter));
6841 }
6842
6843 @@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
6844 */
6845 static inline void atomic_dec(atomic_t *v)
6846 {
6847 - asm volatile(LOCK_PREFIX "decl %0"
6848 + asm volatile(LOCK_PREFIX "decl %0\n"
6849 +
6850 +#ifdef CONFIG_PAX_REFCOUNT
6851 + "jno 0f\n"
6852 + LOCK_PREFIX "incl %0\n"
6853 + "int $4\n0:\n"
6854 + _ASM_EXTABLE(0b, 0b)
6855 +#endif
6856 +
6857 + : "+m" (v->counter));
6858 +}
6859 +
6860 +/**
6861 + * atomic_dec_unchecked - decrement atomic variable
6862 + * @v: pointer of type atomic_unchecked_t
6863 + *
6864 + * Atomically decrements @v by 1.
6865 + */
6866 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6867 +{
6868 + asm volatile(LOCK_PREFIX "decl %0\n"
6869 : "+m" (v->counter));
6870 }
6871
6872 @@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
6873 {
6874 unsigned char c;
6875
6876 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6877 + asm volatile(LOCK_PREFIX "decl %0\n"
6878 +
6879 +#ifdef CONFIG_PAX_REFCOUNT
6880 + "jno 0f\n"
6881 + LOCK_PREFIX "incl %0\n"
6882 + "int $4\n0:\n"
6883 + _ASM_EXTABLE(0b, 0b)
6884 +#endif
6885 +
6886 + "sete %1\n"
6887 : "+m" (v->counter), "=qm" (c)
6888 : : "memory");
6889 return c != 0;
6890 @@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
6891 {
6892 unsigned char c;
6893
6894 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6895 + asm volatile(LOCK_PREFIX "incl %0\n"
6896 +
6897 +#ifdef CONFIG_PAX_REFCOUNT
6898 + "jno 0f\n"
6899 + LOCK_PREFIX "decl %0\n"
6900 + "into\n0:\n"
6901 + _ASM_EXTABLE(0b, 0b)
6902 +#endif
6903 +
6904 + "sete %1\n"
6905 + : "+m" (v->counter), "=qm" (c)
6906 + : : "memory");
6907 + return c != 0;
6908 +}
6909 +
6910 +/**
6911 + * atomic_inc_and_test_unchecked - increment and test
6912 + * @v: pointer of type atomic_unchecked_t
6913 + *
6914 + * Atomically increments @v by 1
6915 + * and returns true if the result is zero, or false for all
6916 + * other cases.
6917 + */
6918 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6919 +{
6920 + unsigned char c;
6921 +
6922 + asm volatile(LOCK_PREFIX "incl %0\n"
6923 + "sete %1\n"
6924 : "+m" (v->counter), "=qm" (c)
6925 : : "memory");
6926 return c != 0;
6927 @@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
6928 {
6929 unsigned char c;
6930
6931 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6932 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6933 +
6934 +#ifdef CONFIG_PAX_REFCOUNT
6935 + "jno 0f\n"
6936 + LOCK_PREFIX "subl %2,%0\n"
6937 + "int $4\n0:\n"
6938 + _ASM_EXTABLE(0b, 0b)
6939 +#endif
6940 +
6941 + "sets %1\n"
6942 : "+m" (v->counter), "=qm" (c)
6943 : "ir" (i) : "memory");
6944 return c;
6945 @@ -179,6 +341,46 @@ static inline int atomic_add_return(int
6946 #endif
6947 /* Modern 486+ processor */
6948 __i = i;
6949 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6950 +
6951 +#ifdef CONFIG_PAX_REFCOUNT
6952 + "jno 0f\n"
6953 + "movl %0, %1\n"
6954 + "int $4\n0:\n"
6955 + _ASM_EXTABLE(0b, 0b)
6956 +#endif
6957 +
6958 + : "+r" (i), "+m" (v->counter)
6959 + : : "memory");
6960 + return i + __i;
6961 +
6962 +#ifdef CONFIG_M386
6963 +no_xadd: /* Legacy 386 processor */
6964 + local_irq_save(flags);
6965 + __i = atomic_read(v);
6966 + atomic_set(v, i + __i);
6967 + local_irq_restore(flags);
6968 + return i + __i;
6969 +#endif
6970 +}
6971 +
6972 +/**
6973 + * atomic_add_return_unchecked - add integer and return
6974 + * @v: pointer of type atomic_unchecked_t
6975 + * @i: integer value to add
6976 + *
6977 + * Atomically adds @i to @v and returns @i + @v
6978 + */
6979 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6980 +{
6981 + int __i;
6982 +#ifdef CONFIG_M386
6983 + unsigned long flags;
6984 + if (unlikely(boot_cpu_data.x86 <= 3))
6985 + goto no_xadd;
6986 +#endif
6987 + /* Modern 486+ processor */
6988 + __i = i;
6989 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6990 : "+r" (i), "+m" (v->counter)
6991 : : "memory");
6992 @@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
6993 return cmpxchg(&v->counter, old, new);
6994 }
6995
6996 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6997 +{
6998 + return cmpxchg(&v->counter, old, new);
6999 +}
7000 +
7001 static inline int atomic_xchg(atomic_t *v, int new)
7002 {
7003 return xchg(&v->counter, new);
7004 }
7005
7006 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7007 +{
7008 + return xchg(&v->counter, new);
7009 +}
7010 +
7011 /**
7012 * atomic_add_unless - add unless the number is already a given value
7013 * @v: pointer of type atomic_t
7014 @@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7015 */
7016 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7017 {
7018 - int c, old;
7019 + int c, old, new;
7020 c = atomic_read(v);
7021 for (;;) {
7022 - if (unlikely(c == (u)))
7023 + if (unlikely(c == u))
7024 break;
7025 - old = atomic_cmpxchg((v), c, c + (a));
7026 +
7027 + asm volatile("addl %2,%0\n"
7028 +
7029 +#ifdef CONFIG_PAX_REFCOUNT
7030 + "jno 0f\n"
7031 + "subl %2,%0\n"
7032 + "int $4\n0:\n"
7033 + _ASM_EXTABLE(0b, 0b)
7034 +#endif
7035 +
7036 + : "=r" (new)
7037 + : "0" (c), "ir" (a));
7038 +
7039 + old = atomic_cmpxchg(v, c, new);
7040 if (likely(old == c))
7041 break;
7042 c = old;
7043 }
7044 - return c != (u);
7045 + return c != u;
7046 }
7047
7048 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7049
7050 #define atomic_inc_return(v) (atomic_add_return(1, v))
7051 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7052 +{
7053 + return atomic_add_return_unchecked(1, v);
7054 +}
7055 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7056
7057 /* These are x86-specific, used by some header files */
7058 @@ -266,9 +495,18 @@ typedef struct {
7059 u64 __aligned(8) counter;
7060 } atomic64_t;
7061
7062 +#ifdef CONFIG_PAX_REFCOUNT
7063 +typedef struct {
7064 + u64 __aligned(8) counter;
7065 +} atomic64_unchecked_t;
7066 +#else
7067 +typedef atomic64_t atomic64_unchecked_t;
7068 +#endif
7069 +
7070 #define ATOMIC64_INIT(val) { (val) }
7071
7072 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7073 +extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7074
7075 /**
7076 * atomic64_xchg - xchg atomic64 variable
7077 @@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7078 * the old value.
7079 */
7080 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7081 +extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7082
7083 /**
7084 * atomic64_set - set atomic64 variable
7085 @@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7086 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7087
7088 /**
7089 + * atomic64_unchecked_set - set atomic64 variable
7090 + * @ptr: pointer to type atomic64_unchecked_t
7091 + * @new_val: value to assign
7092 + *
7093 + * Atomically sets the value of @ptr to @new_val.
7094 + */
7095 +extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7096 +
7097 +/**
7098 * atomic64_read - read atomic64 variable
7099 * @ptr: pointer to type atomic64_t
7100 *
7101 @@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7102 return res;
7103 }
7104
7105 -extern u64 atomic64_read(atomic64_t *ptr);
7106 +/**
7107 + * atomic64_read_unchecked - read atomic64 variable
7108 + * @ptr: pointer to type atomic64_unchecked_t
7109 + *
7110 + * Atomically reads the value of @ptr and returns it.
7111 + */
7112 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7113 +{
7114 + u64 res;
7115 +
7116 + /*
7117 + * Note, we inline this atomic64_unchecked_t primitive because
7118 + * it only clobbers EAX/EDX and leaves the others
7119 + * untouched. We also (somewhat subtly) rely on the
7120 + * fact that cmpxchg8b returns the current 64-bit value
7121 + * of the memory location we are touching:
7122 + */
7123 + asm volatile(
7124 + "mov %%ebx, %%eax\n\t"
7125 + "mov %%ecx, %%edx\n\t"
7126 + LOCK_PREFIX "cmpxchg8b %1\n"
7127 + : "=&A" (res)
7128 + : "m" (*ptr)
7129 + );
7130 +
7131 + return res;
7132 +}
7133
7134 /**
7135 * atomic64_add_return - add and return
7136 @@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7137 * Other variants with different arithmetic operators:
7138 */
7139 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7140 +extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7141 extern u64 atomic64_inc_return(atomic64_t *ptr);
7142 +extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7143 extern u64 atomic64_dec_return(atomic64_t *ptr);
7144 +extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7145
7146 /**
7147 * atomic64_add - add integer to atomic64 variable
7148 @@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7149 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7150
7151 /**
7152 + * atomic64_add_unchecked - add integer to atomic64 variable
7153 + * @delta: integer value to add
7154 + * @ptr: pointer to type atomic64_unchecked_t
7155 + *
7156 + * Atomically adds @delta to @ptr.
7157 + */
7158 +extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7159 +
7160 +/**
7161 * atomic64_sub - subtract the atomic64 variable
7162 * @delta: integer value to subtract
7163 * @ptr: pointer to type atomic64_t
7164 @@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7165 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7166
7167 /**
7168 + * atomic64_sub_unchecked - subtract the atomic64 variable
7169 + * @delta: integer value to subtract
7170 + * @ptr: pointer to type atomic64_unchecked_t
7171 + *
7172 + * Atomically subtracts @delta from @ptr.
7173 + */
7174 +extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7175 +
7176 +/**
7177 * atomic64_sub_and_test - subtract value from variable and test result
7178 * @delta: integer value to subtract
7179 * @ptr: pointer to type atomic64_t
7180 @@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7181 extern void atomic64_inc(atomic64_t *ptr);
7182
7183 /**
7184 + * atomic64_inc_unchecked - increment atomic64 variable
7185 + * @ptr: pointer to type atomic64_unchecked_t
7186 + *
7187 + * Atomically increments @ptr by 1.
7188 + */
7189 +extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7190 +
7191 +/**
7192 * atomic64_dec - decrement atomic64 variable
7193 * @ptr: pointer to type atomic64_t
7194 *
7195 @@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7196 extern void atomic64_dec(atomic64_t *ptr);
7197
7198 /**
7199 + * atomic64_dec_unchecked - decrement atomic64 variable
7200 + * @ptr: pointer to type atomic64_unchecked_t
7201 + *
7202 + * Atomically decrements @ptr by 1.
7203 + */
7204 +extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7205 +
7206 +/**
7207 * atomic64_dec_and_test - decrement and test
7208 * @ptr: pointer to type atomic64_t
7209 *
7210 diff -urNp linux-2.6.32.42/arch/x86/include/asm/atomic_64.h linux-2.6.32.42/arch/x86/include/asm/atomic_64.h
7211 --- linux-2.6.32.42/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7212 +++ linux-2.6.32.42/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7213 @@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7214 }
7215
7216 /**
7217 + * atomic_read_unchecked - read atomic variable
7218 + * @v: pointer of type atomic_unchecked_t
7219 + *
7220 + * Atomically reads the value of @v.
7221 + */
7222 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7223 +{
7224 + return v->counter;
7225 +}
7226 +
7227 +/**
7228 * atomic_set - set atomic variable
7229 * @v: pointer of type atomic_t
7230 * @i: required value
7231 @@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7232 }
7233
7234 /**
7235 + * atomic_set_unchecked - set atomic variable
7236 + * @v: pointer of type atomic_unchecked_t
7237 + * @i: required value
7238 + *
7239 + * Atomically sets the value of @v to @i.
7240 + */
7241 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7242 +{
7243 + v->counter = i;
7244 +}
7245 +
7246 +/**
7247 * atomic_add - add integer to atomic variable
7248 * @i: integer value to add
7249 * @v: pointer of type atomic_t
7250 @@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7251 */
7252 static inline void atomic_add(int i, atomic_t *v)
7253 {
7254 - asm volatile(LOCK_PREFIX "addl %1,%0"
7255 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7256 +
7257 +#ifdef CONFIG_PAX_REFCOUNT
7258 + "jno 0f\n"
7259 + LOCK_PREFIX "subl %1,%0\n"
7260 + "int $4\n0:\n"
7261 + _ASM_EXTABLE(0b, 0b)
7262 +#endif
7263 +
7264 + : "=m" (v->counter)
7265 + : "ir" (i), "m" (v->counter));
7266 +}
7267 +
7268 +/**
7269 + * atomic_add_unchecked - add integer to atomic variable
7270 + * @i: integer value to add
7271 + * @v: pointer of type atomic_unchecked_t
7272 + *
7273 + * Atomically adds @i to @v.
7274 + */
7275 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7276 +{
7277 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
7278 : "=m" (v->counter)
7279 : "ir" (i), "m" (v->counter));
7280 }
7281 @@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7282 */
7283 static inline void atomic_sub(int i, atomic_t *v)
7284 {
7285 - asm volatile(LOCK_PREFIX "subl %1,%0"
7286 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7287 +
7288 +#ifdef CONFIG_PAX_REFCOUNT
7289 + "jno 0f\n"
7290 + LOCK_PREFIX "addl %1,%0\n"
7291 + "int $4\n0:\n"
7292 + _ASM_EXTABLE(0b, 0b)
7293 +#endif
7294 +
7295 + : "=m" (v->counter)
7296 + : "ir" (i), "m" (v->counter));
7297 +}
7298 +
7299 +/**
7300 + * atomic_sub_unchecked - subtract the atomic variable
7301 + * @i: integer value to subtract
7302 + * @v: pointer of type atomic_unchecked_t
7303 + *
7304 + * Atomically subtracts @i from @v.
7305 + */
7306 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7307 +{
7308 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
7309 : "=m" (v->counter)
7310 : "ir" (i), "m" (v->counter));
7311 }
7312 @@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7313 {
7314 unsigned char c;
7315
7316 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7317 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
7318 +
7319 +#ifdef CONFIG_PAX_REFCOUNT
7320 + "jno 0f\n"
7321 + LOCK_PREFIX "addl %2,%0\n"
7322 + "int $4\n0:\n"
7323 + _ASM_EXTABLE(0b, 0b)
7324 +#endif
7325 +
7326 + "sete %1\n"
7327 : "=m" (v->counter), "=qm" (c)
7328 : "ir" (i), "m" (v->counter) : "memory");
7329 return c;
7330 @@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7331 */
7332 static inline void atomic_inc(atomic_t *v)
7333 {
7334 - asm volatile(LOCK_PREFIX "incl %0"
7335 + asm volatile(LOCK_PREFIX "incl %0\n"
7336 +
7337 +#ifdef CONFIG_PAX_REFCOUNT
7338 + "jno 0f\n"
7339 + LOCK_PREFIX "decl %0\n"
7340 + "int $4\n0:\n"
7341 + _ASM_EXTABLE(0b, 0b)
7342 +#endif
7343 +
7344 + : "=m" (v->counter)
7345 + : "m" (v->counter));
7346 +}
7347 +
7348 +/**
7349 + * atomic_inc_unchecked - increment atomic variable
7350 + * @v: pointer of type atomic_unchecked_t
7351 + *
7352 + * Atomically increments @v by 1.
7353 + */
7354 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7355 +{
7356 + asm volatile(LOCK_PREFIX "incl %0\n"
7357 : "=m" (v->counter)
7358 : "m" (v->counter));
7359 }
7360 @@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7361 */
7362 static inline void atomic_dec(atomic_t *v)
7363 {
7364 - asm volatile(LOCK_PREFIX "decl %0"
7365 + asm volatile(LOCK_PREFIX "decl %0\n"
7366 +
7367 +#ifdef CONFIG_PAX_REFCOUNT
7368 + "jno 0f\n"
7369 + LOCK_PREFIX "incl %0\n"
7370 + "int $4\n0:\n"
7371 + _ASM_EXTABLE(0b, 0b)
7372 +#endif
7373 +
7374 + : "=m" (v->counter)
7375 + : "m" (v->counter));
7376 +}
7377 +
7378 +/**
7379 + * atomic_dec_unchecked - decrement atomic variable
7380 + * @v: pointer of type atomic_unchecked_t
7381 + *
7382 + * Atomically decrements @v by 1.
7383 + */
7384 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7385 +{
7386 + asm volatile(LOCK_PREFIX "decl %0\n"
7387 : "=m" (v->counter)
7388 : "m" (v->counter));
7389 }
7390 @@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7391 {
7392 unsigned char c;
7393
7394 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
7395 + asm volatile(LOCK_PREFIX "decl %0\n"
7396 +
7397 +#ifdef CONFIG_PAX_REFCOUNT
7398 + "jno 0f\n"
7399 + LOCK_PREFIX "incl %0\n"
7400 + "int $4\n0:\n"
7401 + _ASM_EXTABLE(0b, 0b)
7402 +#endif
7403 +
7404 + "sete %1\n"
7405 : "=m" (v->counter), "=qm" (c)
7406 : "m" (v->counter) : "memory");
7407 return c != 0;
7408 @@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7409 {
7410 unsigned char c;
7411
7412 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
7413 + asm volatile(LOCK_PREFIX "incl %0\n"
7414 +
7415 +#ifdef CONFIG_PAX_REFCOUNT
7416 + "jno 0f\n"
7417 + LOCK_PREFIX "decl %0\n"
7418 + "int $4\n0:\n"
7419 + _ASM_EXTABLE(0b, 0b)
7420 +#endif
7421 +
7422 + "sete %1\n"
7423 + : "=m" (v->counter), "=qm" (c)
7424 + : "m" (v->counter) : "memory");
7425 + return c != 0;
7426 +}
7427 +
7428 +/**
7429 + * atomic_inc_and_test_unchecked - increment and test
7430 + * @v: pointer of type atomic_unchecked_t
7431 + *
7432 + * Atomically increments @v by 1
7433 + * and returns true if the result is zero, or false for all
7434 + * other cases.
7435 + */
7436 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7437 +{
7438 + unsigned char c;
7439 +
7440 + asm volatile(LOCK_PREFIX "incl %0\n"
7441 + "sete %1\n"
7442 : "=m" (v->counter), "=qm" (c)
7443 : "m" (v->counter) : "memory");
7444 return c != 0;
7445 @@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7446 {
7447 unsigned char c;
7448
7449 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7450 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
7451 +
7452 +#ifdef CONFIG_PAX_REFCOUNT
7453 + "jno 0f\n"
7454 + LOCK_PREFIX "subl %2,%0\n"
7455 + "int $4\n0:\n"
7456 + _ASM_EXTABLE(0b, 0b)
7457 +#endif
7458 +
7459 + "sets %1\n"
7460 : "=m" (v->counter), "=qm" (c)
7461 : "ir" (i), "m" (v->counter) : "memory");
7462 return c;
7463 @@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7464 static inline int atomic_add_return(int i, atomic_t *v)
7465 {
7466 int __i = i;
7467 - asm volatile(LOCK_PREFIX "xaddl %0, %1"
7468 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7469 +
7470 +#ifdef CONFIG_PAX_REFCOUNT
7471 + "jno 0f\n"
7472 + "movl %0, %1\n"
7473 + "int $4\n0:\n"
7474 + _ASM_EXTABLE(0b, 0b)
7475 +#endif
7476 +
7477 + : "+r" (i), "+m" (v->counter)
7478 + : : "memory");
7479 + return i + __i;
7480 +}
7481 +
7482 +/**
7483 + * atomic_add_return_unchecked - add and return
7484 + * @i: integer value to add
7485 + * @v: pointer of type atomic_unchecked_t
7486 + *
7487 + * Atomically adds @i to @v and returns @i + @v
7488 + */
7489 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7490 +{
7491 + int __i = i;
7492 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7493 : "+r" (i), "+m" (v->counter)
7494 : : "memory");
7495 return i + __i;
7496 @@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7497 }
7498
7499 #define atomic_inc_return(v) (atomic_add_return(1, v))
7500 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7501 +{
7502 + return atomic_add_return_unchecked(1, v);
7503 +}
7504 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7505
7506 /* The 64-bit atomic type */
7507 @@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7508 }
7509
7510 /**
7511 + * atomic64_read_unchecked - read atomic64 variable
7512 + * @v: pointer of type atomic64_unchecked_t
7513 + *
7514 + * Atomically reads the value of @v.
7515 + * Doesn't imply a read memory barrier.
7516 + */
7517 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7518 +{
7519 + return v->counter;
7520 +}
7521 +
7522 +/**
7523 * atomic64_set - set atomic64 variable
7524 * @v: pointer to type atomic64_t
7525 * @i: required value
7526 @@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7527 }
7528
7529 /**
7530 + * atomic64_set_unchecked - set atomic64 variable
7531 + * @v: pointer to type atomic64_unchecked_t
7532 + * @i: required value
7533 + *
7534 + * Atomically sets the value of @v to @i.
7535 + */
7536 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7537 +{
7538 + v->counter = i;
7539 +}
7540 +
7541 +/**
7542 * atomic64_add - add integer to atomic64 variable
7543 * @i: integer value to add
7544 * @v: pointer to type atomic64_t
7545 @@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7546 */
7547 static inline void atomic64_add(long i, atomic64_t *v)
7548 {
7549 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
7550 +
7551 +#ifdef CONFIG_PAX_REFCOUNT
7552 + "jno 0f\n"
7553 + LOCK_PREFIX "subq %1,%0\n"
7554 + "int $4\n0:\n"
7555 + _ASM_EXTABLE(0b, 0b)
7556 +#endif
7557 +
7558 + : "=m" (v->counter)
7559 + : "er" (i), "m" (v->counter));
7560 +}
7561 +
7562 +/**
7563 + * atomic64_add_unchecked - add integer to atomic64 variable
7564 + * @i: integer value to add
7565 + * @v: pointer to type atomic64_unchecked_t
7566 + *
7567 + * Atomically adds @i to @v.
7568 + */
7569 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7570 +{
7571 asm volatile(LOCK_PREFIX "addq %1,%0"
7572 : "=m" (v->counter)
7573 : "er" (i), "m" (v->counter));
7574 @@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7575 */
7576 static inline void atomic64_sub(long i, atomic64_t *v)
7577 {
7578 - asm volatile(LOCK_PREFIX "subq %1,%0"
7579 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
7580 +
7581 +#ifdef CONFIG_PAX_REFCOUNT
7582 + "jno 0f\n"
7583 + LOCK_PREFIX "addq %1,%0\n"
7584 + "int $4\n0:\n"
7585 + _ASM_EXTABLE(0b, 0b)
7586 +#endif
7587 +
7588 : "=m" (v->counter)
7589 : "er" (i), "m" (v->counter));
7590 }
7591 @@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7592 {
7593 unsigned char c;
7594
7595 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7596 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
7597 +
7598 +#ifdef CONFIG_PAX_REFCOUNT
7599 + "jno 0f\n"
7600 + LOCK_PREFIX "addq %2,%0\n"
7601 + "int $4\n0:\n"
7602 + _ASM_EXTABLE(0b, 0b)
7603 +#endif
7604 +
7605 + "sete %1\n"
7606 : "=m" (v->counter), "=qm" (c)
7607 : "er" (i), "m" (v->counter) : "memory");
7608 return c;
7609 @@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7610 */
7611 static inline void atomic64_inc(atomic64_t *v)
7612 {
7613 + asm volatile(LOCK_PREFIX "incq %0\n"
7614 +
7615 +#ifdef CONFIG_PAX_REFCOUNT
7616 + "jno 0f\n"
7617 + LOCK_PREFIX "decq %0\n"
7618 + "int $4\n0:\n"
7619 + _ASM_EXTABLE(0b, 0b)
7620 +#endif
7621 +
7622 + : "=m" (v->counter)
7623 + : "m" (v->counter));
7624 +}
7625 +
7626 +/**
7627 + * atomic64_inc_unchecked - increment atomic64 variable
7628 + * @v: pointer to type atomic64_unchecked_t
7629 + *
7630 + * Atomically increments @v by 1.
7631 + */
7632 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7633 +{
7634 asm volatile(LOCK_PREFIX "incq %0"
7635 : "=m" (v->counter)
7636 : "m" (v->counter));
7637 @@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7638 */
7639 static inline void atomic64_dec(atomic64_t *v)
7640 {
7641 - asm volatile(LOCK_PREFIX "decq %0"
7642 + asm volatile(LOCK_PREFIX "decq %0\n"
7643 +
7644 +#ifdef CONFIG_PAX_REFCOUNT
7645 + "jno 0f\n"
7646 + LOCK_PREFIX "incq %0\n"
7647 + "int $4\n0:\n"
7648 + _ASM_EXTABLE(0b, 0b)
7649 +#endif
7650 +
7651 + : "=m" (v->counter)
7652 + : "m" (v->counter));
7653 +}
7654 +
7655 +/**
7656 + * atomic64_dec_unchecked - decrement atomic64 variable
7657 + * @v: pointer to type atomic64_t
7658 + *
7659 + * Atomically decrements @v by 1.
7660 + */
7661 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7662 +{
7663 + asm volatile(LOCK_PREFIX "decq %0\n"
7664 : "=m" (v->counter)
7665 : "m" (v->counter));
7666 }
7667 @@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
7668 {
7669 unsigned char c;
7670
7671 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
7672 + asm volatile(LOCK_PREFIX "decq %0\n"
7673 +
7674 +#ifdef CONFIG_PAX_REFCOUNT
7675 + "jno 0f\n"
7676 + LOCK_PREFIX "incq %0\n"
7677 + "int $4\n0:\n"
7678 + _ASM_EXTABLE(0b, 0b)
7679 +#endif
7680 +
7681 + "sete %1\n"
7682 : "=m" (v->counter), "=qm" (c)
7683 : "m" (v->counter) : "memory");
7684 return c != 0;
7685 @@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
7686 {
7687 unsigned char c;
7688
7689 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
7690 + asm volatile(LOCK_PREFIX "incq %0\n"
7691 +
7692 +#ifdef CONFIG_PAX_REFCOUNT
7693 + "jno 0f\n"
7694 + LOCK_PREFIX "decq %0\n"
7695 + "int $4\n0:\n"
7696 + _ASM_EXTABLE(0b, 0b)
7697 +#endif
7698 +
7699 + "sete %1\n"
7700 : "=m" (v->counter), "=qm" (c)
7701 : "m" (v->counter) : "memory");
7702 return c != 0;
7703 @@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
7704 {
7705 unsigned char c;
7706
7707 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7708 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
7709 +
7710 +#ifdef CONFIG_PAX_REFCOUNT
7711 + "jno 0f\n"
7712 + LOCK_PREFIX "subq %2,%0\n"
7713 + "int $4\n0:\n"
7714 + _ASM_EXTABLE(0b, 0b)
7715 +#endif
7716 +
7717 + "sets %1\n"
7718 : "=m" (v->counter), "=qm" (c)
7719 : "er" (i), "m" (v->counter) : "memory");
7720 return c;
7721 @@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
7722 static inline long atomic64_add_return(long i, atomic64_t *v)
7723 {
7724 long __i = i;
7725 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7726 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7727 +
7728 +#ifdef CONFIG_PAX_REFCOUNT
7729 + "jno 0f\n"
7730 + "movq %0, %1\n"
7731 + "int $4\n0:\n"
7732 + _ASM_EXTABLE(0b, 0b)
7733 +#endif
7734 +
7735 + : "+r" (i), "+m" (v->counter)
7736 + : : "memory");
7737 + return i + __i;
7738 +}
7739 +
7740 +/**
7741 + * atomic64_add_return_unchecked - add and return
7742 + * @i: integer value to add
7743 + * @v: pointer to type atomic64_unchecked_t
7744 + *
7745 + * Atomically adds @i to @v and returns @i + @v
7746 + */
7747 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7748 +{
7749 + long __i = i;
7750 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
7751 : "+r" (i), "+m" (v->counter)
7752 : : "memory");
7753 return i + __i;
7754 @@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
7755 }
7756
7757 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7758 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7759 +{
7760 + return atomic64_add_return_unchecked(1, v);
7761 +}
7762 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7763
7764 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7765 @@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
7766 return cmpxchg(&v->counter, old, new);
7767 }
7768
7769 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7770 +{
7771 + return cmpxchg(&v->counter, old, new);
7772 +}
7773 +
7774 static inline long atomic64_xchg(atomic64_t *v, long new)
7775 {
7776 return xchg(&v->counter, new);
7777 }
7778
7779 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7780 +{
7781 + return xchg(&v->counter, new);
7782 +}
7783 +
7784 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
7785 {
7786 return cmpxchg(&v->counter, old, new);
7787 }
7788
7789 +static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7790 +{
7791 + return cmpxchg(&v->counter, old, new);
7792 +}
7793 +
7794 static inline long atomic_xchg(atomic_t *v, int new)
7795 {
7796 return xchg(&v->counter, new);
7797 }
7798
7799 +static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7800 +{
7801 + return xchg(&v->counter, new);
7802 +}
7803 +
7804 /**
7805 * atomic_add_unless - add unless the number is a given value
7806 * @v: pointer of type atomic_t
7807 @@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
7808 */
7809 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7810 {
7811 - int c, old;
7812 + int c, old, new;
7813 c = atomic_read(v);
7814 for (;;) {
7815 - if (unlikely(c == (u)))
7816 + if (unlikely(c == u))
7817 break;
7818 - old = atomic_cmpxchg((v), c, c + (a));
7819 +
7820 + asm volatile("addl %2,%0\n"
7821 +
7822 +#ifdef CONFIG_PAX_REFCOUNT
7823 + "jno 0f\n"
7824 + "subl %2,%0\n"
7825 + "int $4\n0:\n"
7826 + _ASM_EXTABLE(0b, 0b)
7827 +#endif
7828 +
7829 + : "=r" (new)
7830 + : "0" (c), "ir" (a));
7831 +
7832 + old = atomic_cmpxchg(v, c, new);
7833 if (likely(old == c))
7834 break;
7835 c = old;
7836 }
7837 - return c != (u);
7838 + return c != u;
7839 }
7840
7841 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7842 @@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
7843 */
7844 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
7845 {
7846 - long c, old;
7847 + long c, old, new;
7848 c = atomic64_read(v);
7849 for (;;) {
7850 - if (unlikely(c == (u)))
7851 + if (unlikely(c == u))
7852 break;
7853 - old = atomic64_cmpxchg((v), c, c + (a));
7854 +
7855 + asm volatile("addq %2,%0\n"
7856 +
7857 +#ifdef CONFIG_PAX_REFCOUNT
7858 + "jno 0f\n"
7859 + "subq %2,%0\n"
7860 + "int $4\n0:\n"
7861 + _ASM_EXTABLE(0b, 0b)
7862 +#endif
7863 +
7864 + : "=r" (new)
7865 + : "0" (c), "er" (a));
7866 +
7867 + old = atomic64_cmpxchg(v, c, new);
7868 if (likely(old == c))
7869 break;
7870 c = old;
7871 }
7872 - return c != (u);
7873 + return c != u;
7874 }
7875
7876 /**
7877 diff -urNp linux-2.6.32.42/arch/x86/include/asm/bitops.h linux-2.6.32.42/arch/x86/include/asm/bitops.h
7878 --- linux-2.6.32.42/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
7879 +++ linux-2.6.32.42/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
7880 @@ -38,7 +38,7 @@
7881 * a mask operation on a byte.
7882 */
7883 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
7884 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
7885 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
7886 #define CONST_MASK(nr) (1 << ((nr) & 7))
7887
7888 /**
7889 diff -urNp linux-2.6.32.42/arch/x86/include/asm/boot.h linux-2.6.32.42/arch/x86/include/asm/boot.h
7890 --- linux-2.6.32.42/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
7891 +++ linux-2.6.32.42/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
7892 @@ -11,10 +11,15 @@
7893 #include <asm/pgtable_types.h>
7894
7895 /* Physical address where kernel should be loaded. */
7896 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7897 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
7898 + (CONFIG_PHYSICAL_ALIGN - 1)) \
7899 & ~(CONFIG_PHYSICAL_ALIGN - 1))
7900
7901 +#ifndef __ASSEMBLY__
7902 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
7903 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
7904 +#endif
7905 +
7906 /* Minimum kernel alignment, as a power of two */
7907 #ifdef CONFIG_X86_64
7908 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
7909 diff -urNp linux-2.6.32.42/arch/x86/include/asm/cacheflush.h linux-2.6.32.42/arch/x86/include/asm/cacheflush.h
7910 --- linux-2.6.32.42/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
7911 +++ linux-2.6.32.42/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
7912 @@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
7913 static inline unsigned long get_page_memtype(struct page *pg)
7914 {
7915 if (!PageUncached(pg) && !PageWC(pg))
7916 - return -1;
7917 + return ~0UL;
7918 else if (!PageUncached(pg) && PageWC(pg))
7919 return _PAGE_CACHE_WC;
7920 else if (PageUncached(pg) && !PageWC(pg))
7921 @@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
7922 SetPageWC(pg);
7923 break;
7924 default:
7925 - case -1:
7926 + case ~0UL:
7927 ClearPageUncached(pg);
7928 ClearPageWC(pg);
7929 break;
7930 diff -urNp linux-2.6.32.42/arch/x86/include/asm/cache.h linux-2.6.32.42/arch/x86/include/asm/cache.h
7931 --- linux-2.6.32.42/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
7932 +++ linux-2.6.32.42/arch/x86/include/asm/cache.h 2011-05-04 17:56:20.000000000 -0400
7933 @@ -5,9 +5,10 @@
7934
7935 /* L1 cache line size */
7936 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
7937 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7938 +#define L1_CACHE_BYTES (_AC(1,U) << L1_CACHE_SHIFT)
7939
7940 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
7941 +#define __read_only __attribute__((__section__(".data.read_only")))
7942
7943 #ifdef CONFIG_X86_VSMP
7944 /* vSMP Internode cacheline shift */
7945 diff -urNp linux-2.6.32.42/arch/x86/include/asm/checksum_32.h linux-2.6.32.42/arch/x86/include/asm/checksum_32.h
7946 --- linux-2.6.32.42/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
7947 +++ linux-2.6.32.42/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
7948 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
7949 int len, __wsum sum,
7950 int *src_err_ptr, int *dst_err_ptr);
7951
7952 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
7953 + int len, __wsum sum,
7954 + int *src_err_ptr, int *dst_err_ptr);
7955 +
7956 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
7957 + int len, __wsum sum,
7958 + int *src_err_ptr, int *dst_err_ptr);
7959 +
7960 /*
7961 * Note: when you get a NULL pointer exception here this means someone
7962 * passed in an incorrect kernel address to one of these functions.
7963 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
7964 int *err_ptr)
7965 {
7966 might_sleep();
7967 - return csum_partial_copy_generic((__force void *)src, dst,
7968 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
7969 len, sum, err_ptr, NULL);
7970 }
7971
7972 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
7973 {
7974 might_sleep();
7975 if (access_ok(VERIFY_WRITE, dst, len))
7976 - return csum_partial_copy_generic(src, (__force void *)dst,
7977 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
7978 len, sum, NULL, err_ptr);
7979
7980 if (len)
7981 diff -urNp linux-2.6.32.42/arch/x86/include/asm/desc_defs.h linux-2.6.32.42/arch/x86/include/asm/desc_defs.h
7982 --- linux-2.6.32.42/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
7983 +++ linux-2.6.32.42/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
7984 @@ -31,6 +31,12 @@ struct desc_struct {
7985 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
7986 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
7987 };
7988 + struct {
7989 + u16 offset_low;
7990 + u16 seg;
7991 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
7992 + unsigned offset_high: 16;
7993 + } gate;
7994 };
7995 } __attribute__((packed));
7996
7997 diff -urNp linux-2.6.32.42/arch/x86/include/asm/desc.h linux-2.6.32.42/arch/x86/include/asm/desc.h
7998 --- linux-2.6.32.42/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
7999 +++ linux-2.6.32.42/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8000 @@ -4,6 +4,7 @@
8001 #include <asm/desc_defs.h>
8002 #include <asm/ldt.h>
8003 #include <asm/mmu.h>
8004 +#include <asm/pgtable.h>
8005 #include <linux/smp.h>
8006
8007 static inline void fill_ldt(struct desc_struct *desc,
8008 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8009 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8010 desc->type = (info->read_exec_only ^ 1) << 1;
8011 desc->type |= info->contents << 2;
8012 + desc->type |= info->seg_not_present ^ 1;
8013 desc->s = 1;
8014 desc->dpl = 0x3;
8015 desc->p = info->seg_not_present ^ 1;
8016 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8017 }
8018
8019 extern struct desc_ptr idt_descr;
8020 -extern gate_desc idt_table[];
8021 -
8022 -struct gdt_page {
8023 - struct desc_struct gdt[GDT_ENTRIES];
8024 -} __attribute__((aligned(PAGE_SIZE)));
8025 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8026 +extern gate_desc idt_table[256];
8027
8028 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8029 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8030 {
8031 - return per_cpu(gdt_page, cpu).gdt;
8032 + return cpu_gdt_table[cpu];
8033 }
8034
8035 #ifdef CONFIG_X86_64
8036 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8037 unsigned long base, unsigned dpl, unsigned flags,
8038 unsigned short seg)
8039 {
8040 - gate->a = (seg << 16) | (base & 0xffff);
8041 - gate->b = (base & 0xffff0000) |
8042 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8043 + gate->gate.offset_low = base;
8044 + gate->gate.seg = seg;
8045 + gate->gate.reserved = 0;
8046 + gate->gate.type = type;
8047 + gate->gate.s = 0;
8048 + gate->gate.dpl = dpl;
8049 + gate->gate.p = 1;
8050 + gate->gate.offset_high = base >> 16;
8051 }
8052
8053 #endif
8054 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8055 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8056 const gate_desc *gate)
8057 {
8058 + pax_open_kernel();
8059 memcpy(&idt[entry], gate, sizeof(*gate));
8060 + pax_close_kernel();
8061 }
8062
8063 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8064 const void *desc)
8065 {
8066 + pax_open_kernel();
8067 memcpy(&ldt[entry], desc, 8);
8068 + pax_close_kernel();
8069 }
8070
8071 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8072 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8073 size = sizeof(struct desc_struct);
8074 break;
8075 }
8076 +
8077 + pax_open_kernel();
8078 memcpy(&gdt[entry], desc, size);
8079 + pax_close_kernel();
8080 }
8081
8082 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8083 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8084
8085 static inline void native_load_tr_desc(void)
8086 {
8087 + pax_open_kernel();
8088 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8089 + pax_close_kernel();
8090 }
8091
8092 static inline void native_load_gdt(const struct desc_ptr *dtr)
8093 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8094 unsigned int i;
8095 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8096
8097 + pax_open_kernel();
8098 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8099 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8100 + pax_close_kernel();
8101 }
8102
8103 #define _LDT_empty(info) \
8104 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8105 desc->limit = (limit >> 16) & 0xf;
8106 }
8107
8108 -static inline void _set_gate(int gate, unsigned type, void *addr,
8109 +static inline void _set_gate(int gate, unsigned type, const void *addr,
8110 unsigned dpl, unsigned ist, unsigned seg)
8111 {
8112 gate_desc s;
8113 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8114 * Pentium F0 0F bugfix can have resulted in the mapped
8115 * IDT being write-protected.
8116 */
8117 -static inline void set_intr_gate(unsigned int n, void *addr)
8118 +static inline void set_intr_gate(unsigned int n, const void *addr)
8119 {
8120 BUG_ON((unsigned)n > 0xFF);
8121 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8122 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8123 /*
8124 * This routine sets up an interrupt gate at directory privilege level 3.
8125 */
8126 -static inline void set_system_intr_gate(unsigned int n, void *addr)
8127 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
8128 {
8129 BUG_ON((unsigned)n > 0xFF);
8130 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8131 }
8132
8133 -static inline void set_system_trap_gate(unsigned int n, void *addr)
8134 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
8135 {
8136 BUG_ON((unsigned)n > 0xFF);
8137 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8138 }
8139
8140 -static inline void set_trap_gate(unsigned int n, void *addr)
8141 +static inline void set_trap_gate(unsigned int n, const void *addr)
8142 {
8143 BUG_ON((unsigned)n > 0xFF);
8144 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8145 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8146 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8147 {
8148 BUG_ON((unsigned)n > 0xFF);
8149 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8150 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8151 }
8152
8153 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8154 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8155 {
8156 BUG_ON((unsigned)n > 0xFF);
8157 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8158 }
8159
8160 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8161 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8162 {
8163 BUG_ON((unsigned)n > 0xFF);
8164 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8165 }
8166
8167 +#ifdef CONFIG_X86_32
8168 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8169 +{
8170 + struct desc_struct d;
8171 +
8172 + if (likely(limit))
8173 + limit = (limit - 1UL) >> PAGE_SHIFT;
8174 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
8175 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8176 +}
8177 +#endif
8178 +
8179 #endif /* _ASM_X86_DESC_H */
8180 diff -urNp linux-2.6.32.42/arch/x86/include/asm/device.h linux-2.6.32.42/arch/x86/include/asm/device.h
8181 --- linux-2.6.32.42/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8182 +++ linux-2.6.32.42/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8183 @@ -6,7 +6,7 @@ struct dev_archdata {
8184 void *acpi_handle;
8185 #endif
8186 #ifdef CONFIG_X86_64
8187 -struct dma_map_ops *dma_ops;
8188 + const struct dma_map_ops *dma_ops;
8189 #endif
8190 #ifdef CONFIG_DMAR
8191 void *iommu; /* hook for IOMMU specific extension */
8192 diff -urNp linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h
8193 --- linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8194 +++ linux-2.6.32.42/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8195 @@ -25,9 +25,9 @@ extern int iommu_merge;
8196 extern struct device x86_dma_fallback_dev;
8197 extern int panic_on_overflow;
8198
8199 -extern struct dma_map_ops *dma_ops;
8200 +extern const struct dma_map_ops *dma_ops;
8201
8202 -static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8203 +static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8204 {
8205 #ifdef CONFIG_X86_32
8206 return dma_ops;
8207 @@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8208 /* Make sure we keep the same behaviour */
8209 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8210 {
8211 - struct dma_map_ops *ops = get_dma_ops(dev);
8212 + const struct dma_map_ops *ops = get_dma_ops(dev);
8213 if (ops->mapping_error)
8214 return ops->mapping_error(dev, dma_addr);
8215
8216 @@ -122,7 +122,7 @@ static inline void *
8217 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8218 gfp_t gfp)
8219 {
8220 - struct dma_map_ops *ops = get_dma_ops(dev);
8221 + const struct dma_map_ops *ops = get_dma_ops(dev);
8222 void *memory;
8223
8224 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8225 @@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8226 static inline void dma_free_coherent(struct device *dev, size_t size,
8227 void *vaddr, dma_addr_t bus)
8228 {
8229 - struct dma_map_ops *ops = get_dma_ops(dev);
8230 + const struct dma_map_ops *ops = get_dma_ops(dev);
8231
8232 WARN_ON(irqs_disabled()); /* for portability */
8233
8234 diff -urNp linux-2.6.32.42/arch/x86/include/asm/e820.h linux-2.6.32.42/arch/x86/include/asm/e820.h
8235 --- linux-2.6.32.42/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8236 +++ linux-2.6.32.42/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8237 @@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8238 #define ISA_END_ADDRESS 0x100000
8239 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8240
8241 -#define BIOS_BEGIN 0x000a0000
8242 +#define BIOS_BEGIN 0x000c0000
8243 #define BIOS_END 0x00100000
8244
8245 #ifdef __KERNEL__
8246 diff -urNp linux-2.6.32.42/arch/x86/include/asm/elf.h linux-2.6.32.42/arch/x86/include/asm/elf.h
8247 --- linux-2.6.32.42/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8248 +++ linux-2.6.32.42/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8249 @@ -257,7 +257,25 @@ extern int force_personality32;
8250 the loader. We need to make sure that it is out of the way of the program
8251 that it will "exec", and that there is sufficient room for the brk. */
8252
8253 +#ifdef CONFIG_PAX_SEGMEXEC
8254 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8255 +#else
8256 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8257 +#endif
8258 +
8259 +#ifdef CONFIG_PAX_ASLR
8260 +#ifdef CONFIG_X86_32
8261 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8262 +
8263 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8264 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8265 +#else
8266 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
8267 +
8268 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8269 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8270 +#endif
8271 +#endif
8272
8273 /* This yields a mask that user programs can use to figure out what
8274 instruction set this CPU supports. This could be done in user space,
8275 @@ -311,8 +329,7 @@ do { \
8276 #define ARCH_DLINFO \
8277 do { \
8278 if (vdso_enabled) \
8279 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8280 - (unsigned long)current->mm->context.vdso); \
8281 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8282 } while (0)
8283
8284 #define AT_SYSINFO 32
8285 @@ -323,7 +340,7 @@ do { \
8286
8287 #endif /* !CONFIG_X86_32 */
8288
8289 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8290 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8291
8292 #define VDSO_ENTRY \
8293 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8294 @@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8295 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8296 #define compat_arch_setup_additional_pages syscall32_setup_pages
8297
8298 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8299 -#define arch_randomize_brk arch_randomize_brk
8300 -
8301 #endif /* _ASM_X86_ELF_H */
8302 diff -urNp linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h
8303 --- linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8304 +++ linux-2.6.32.42/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8305 @@ -15,6 +15,6 @@ enum reboot_type {
8306
8307 extern enum reboot_type reboot_type;
8308
8309 -extern void machine_emergency_restart(void);
8310 +extern void machine_emergency_restart(void) __noreturn;
8311
8312 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8313 diff -urNp linux-2.6.32.42/arch/x86/include/asm/futex.h linux-2.6.32.42/arch/x86/include/asm/futex.h
8314 --- linux-2.6.32.42/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8315 +++ linux-2.6.32.42/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8316 @@ -12,16 +12,18 @@
8317 #include <asm/system.h>
8318
8319 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8320 + typecheck(u32 *, uaddr); \
8321 asm volatile("1:\t" insn "\n" \
8322 "2:\t.section .fixup,\"ax\"\n" \
8323 "3:\tmov\t%3, %1\n" \
8324 "\tjmp\t2b\n" \
8325 "\t.previous\n" \
8326 _ASM_EXTABLE(1b, 3b) \
8327 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8328 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8329 : "i" (-EFAULT), "0" (oparg), "1" (0))
8330
8331 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8332 + typecheck(u32 *, uaddr); \
8333 asm volatile("1:\tmovl %2, %0\n" \
8334 "\tmovl\t%0, %3\n" \
8335 "\t" insn "\n" \
8336 @@ -34,10 +36,10 @@
8337 _ASM_EXTABLE(1b, 4b) \
8338 _ASM_EXTABLE(2b, 4b) \
8339 : "=&a" (oldval), "=&r" (ret), \
8340 - "+m" (*uaddr), "=&r" (tem) \
8341 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8342 : "r" (oparg), "i" (-EFAULT), "1" (0))
8343
8344 -static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8345 +static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8346 {
8347 int op = (encoded_op >> 28) & 7;
8348 int cmp = (encoded_op >> 24) & 15;
8349 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8350
8351 switch (op) {
8352 case FUTEX_OP_SET:
8353 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8354 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8355 break;
8356 case FUTEX_OP_ADD:
8357 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8358 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8359 uaddr, oparg);
8360 break;
8361 case FUTEX_OP_OR:
8362 @@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8363 return ret;
8364 }
8365
8366 -static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8367 +static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8368 int newval)
8369 {
8370
8371 @@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8372 return -ENOSYS;
8373 #endif
8374
8375 - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8376 + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8377 return -EFAULT;
8378
8379 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8380 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8381 "2:\t.section .fixup, \"ax\"\n"
8382 "3:\tmov %2, %0\n"
8383 "\tjmp 2b\n"
8384 "\t.previous\n"
8385 _ASM_EXTABLE(1b, 3b)
8386 - : "=a" (oldval), "+m" (*uaddr)
8387 + : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8388 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8389 : "memory"
8390 );
8391 diff -urNp linux-2.6.32.42/arch/x86/include/asm/hw_irq.h linux-2.6.32.42/arch/x86/include/asm/hw_irq.h
8392 --- linux-2.6.32.42/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8393 +++ linux-2.6.32.42/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8394 @@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8395 extern void enable_IO_APIC(void);
8396
8397 /* Statistics */
8398 -extern atomic_t irq_err_count;
8399 -extern atomic_t irq_mis_count;
8400 +extern atomic_unchecked_t irq_err_count;
8401 +extern atomic_unchecked_t irq_mis_count;
8402
8403 /* EISA */
8404 extern void eisa_set_level_irq(unsigned int irq);
8405 diff -urNp linux-2.6.32.42/arch/x86/include/asm/i387.h linux-2.6.32.42/arch/x86/include/asm/i387.h
8406 --- linux-2.6.32.42/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8407 +++ linux-2.6.32.42/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8408 @@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8409 {
8410 int err;
8411
8412 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8413 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8414 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8415 +#endif
8416 +
8417 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8418 "2:\n"
8419 ".section .fixup,\"ax\"\n"
8420 @@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8421 {
8422 int err;
8423
8424 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8425 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8426 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8427 +#endif
8428 +
8429 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8430 "2:\n"
8431 ".section .fixup,\"ax\"\n"
8432 @@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8433 }
8434
8435 /* We need a safe address that is cheap to find and that is already
8436 - in L1 during context switch. The best choices are unfortunately
8437 - different for UP and SMP */
8438 -#ifdef CONFIG_SMP
8439 -#define safe_address (__per_cpu_offset[0])
8440 -#else
8441 -#define safe_address (kstat_cpu(0).cpustat.user)
8442 -#endif
8443 + in L1 during context switch. */
8444 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8445
8446 /*
8447 * These must be called with preempt disabled
8448 @@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8449 struct thread_info *me = current_thread_info();
8450 preempt_disable();
8451 if (me->status & TS_USEDFPU)
8452 - __save_init_fpu(me->task);
8453 + __save_init_fpu(current);
8454 else
8455 clts();
8456 }
8457 diff -urNp linux-2.6.32.42/arch/x86/include/asm/io_32.h linux-2.6.32.42/arch/x86/include/asm/io_32.h
8458 --- linux-2.6.32.42/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8459 +++ linux-2.6.32.42/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8460 @@ -3,6 +3,7 @@
8461
8462 #include <linux/string.h>
8463 #include <linux/compiler.h>
8464 +#include <asm/processor.h>
8465
8466 /*
8467 * This file contains the definitions for the x86 IO instructions
8468 @@ -42,6 +43,17 @@
8469
8470 #ifdef __KERNEL__
8471
8472 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8473 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8474 +{
8475 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8476 +}
8477 +
8478 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8479 +{
8480 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8481 +}
8482 +
8483 #include <asm-generic/iomap.h>
8484
8485 #include <linux/vmalloc.h>
8486 diff -urNp linux-2.6.32.42/arch/x86/include/asm/io_64.h linux-2.6.32.42/arch/x86/include/asm/io_64.h
8487 --- linux-2.6.32.42/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8488 +++ linux-2.6.32.42/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8489 @@ -140,6 +140,17 @@ __OUTS(l)
8490
8491 #include <linux/vmalloc.h>
8492
8493 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8494 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8495 +{
8496 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8497 +}
8498 +
8499 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8500 +{
8501 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8502 +}
8503 +
8504 #include <asm-generic/iomap.h>
8505
8506 void __memcpy_fromio(void *, unsigned long, unsigned);
8507 diff -urNp linux-2.6.32.42/arch/x86/include/asm/iommu.h linux-2.6.32.42/arch/x86/include/asm/iommu.h
8508 --- linux-2.6.32.42/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8509 +++ linux-2.6.32.42/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8510 @@ -3,7 +3,7 @@
8511
8512 extern void pci_iommu_shutdown(void);
8513 extern void no_iommu_init(void);
8514 -extern struct dma_map_ops nommu_dma_ops;
8515 +extern const struct dma_map_ops nommu_dma_ops;
8516 extern int force_iommu, no_iommu;
8517 extern int iommu_detected;
8518 extern int iommu_pass_through;
8519 diff -urNp linux-2.6.32.42/arch/x86/include/asm/irqflags.h linux-2.6.32.42/arch/x86/include/asm/irqflags.h
8520 --- linux-2.6.32.42/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8521 +++ linux-2.6.32.42/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8522 @@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8523 sti; \
8524 sysexit
8525
8526 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
8527 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8528 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
8529 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8530 +
8531 #else
8532 #define INTERRUPT_RETURN iret
8533 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8534 diff -urNp linux-2.6.32.42/arch/x86/include/asm/kprobes.h linux-2.6.32.42/arch/x86/include/asm/kprobes.h
8535 --- linux-2.6.32.42/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8536 +++ linux-2.6.32.42/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8537 @@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8538 #define BREAKPOINT_INSTRUCTION 0xcc
8539 #define RELATIVEJUMP_INSTRUCTION 0xe9
8540 #define MAX_INSN_SIZE 16
8541 -#define MAX_STACK_SIZE 64
8542 -#define MIN_STACK_SIZE(ADDR) \
8543 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8544 - THREAD_SIZE - (unsigned long)(ADDR))) \
8545 - ? (MAX_STACK_SIZE) \
8546 - : (((unsigned long)current_thread_info()) + \
8547 - THREAD_SIZE - (unsigned long)(ADDR)))
8548 +#define MAX_STACK_SIZE 64UL
8549 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8550
8551 #define flush_insn_slot(p) do { } while (0)
8552
8553 diff -urNp linux-2.6.32.42/arch/x86/include/asm/kvm_host.h linux-2.6.32.42/arch/x86/include/asm/kvm_host.h
8554 --- linux-2.6.32.42/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8555 +++ linux-2.6.32.42/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8556 @@ -536,7 +536,7 @@ struct kvm_x86_ops {
8557 const struct trace_print_flags *exit_reasons_str;
8558 };
8559
8560 -extern struct kvm_x86_ops *kvm_x86_ops;
8561 +extern const struct kvm_x86_ops *kvm_x86_ops;
8562
8563 int kvm_mmu_module_init(void);
8564 void kvm_mmu_module_exit(void);
8565 diff -urNp linux-2.6.32.42/arch/x86/include/asm/local.h linux-2.6.32.42/arch/x86/include/asm/local.h
8566 --- linux-2.6.32.42/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8567 +++ linux-2.6.32.42/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8568 @@ -18,26 +18,58 @@ typedef struct {
8569
8570 static inline void local_inc(local_t *l)
8571 {
8572 - asm volatile(_ASM_INC "%0"
8573 + asm volatile(_ASM_INC "%0\n"
8574 +
8575 +#ifdef CONFIG_PAX_REFCOUNT
8576 + "jno 0f\n"
8577 + _ASM_DEC "%0\n"
8578 + "int $4\n0:\n"
8579 + _ASM_EXTABLE(0b, 0b)
8580 +#endif
8581 +
8582 : "+m" (l->a.counter));
8583 }
8584
8585 static inline void local_dec(local_t *l)
8586 {
8587 - asm volatile(_ASM_DEC "%0"
8588 + asm volatile(_ASM_DEC "%0\n"
8589 +
8590 +#ifdef CONFIG_PAX_REFCOUNT
8591 + "jno 0f\n"
8592 + _ASM_INC "%0\n"
8593 + "int $4\n0:\n"
8594 + _ASM_EXTABLE(0b, 0b)
8595 +#endif
8596 +
8597 : "+m" (l->a.counter));
8598 }
8599
8600 static inline void local_add(long i, local_t *l)
8601 {
8602 - asm volatile(_ASM_ADD "%1,%0"
8603 + asm volatile(_ASM_ADD "%1,%0\n"
8604 +
8605 +#ifdef CONFIG_PAX_REFCOUNT
8606 + "jno 0f\n"
8607 + _ASM_SUB "%1,%0\n"
8608 + "int $4\n0:\n"
8609 + _ASM_EXTABLE(0b, 0b)
8610 +#endif
8611 +
8612 : "+m" (l->a.counter)
8613 : "ir" (i));
8614 }
8615
8616 static inline void local_sub(long i, local_t *l)
8617 {
8618 - asm volatile(_ASM_SUB "%1,%0"
8619 + asm volatile(_ASM_SUB "%1,%0\n"
8620 +
8621 +#ifdef CONFIG_PAX_REFCOUNT
8622 + "jno 0f\n"
8623 + _ASM_ADD "%1,%0\n"
8624 + "int $4\n0:\n"
8625 + _ASM_EXTABLE(0b, 0b)
8626 +#endif
8627 +
8628 : "+m" (l->a.counter)
8629 : "ir" (i));
8630 }
8631 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8632 {
8633 unsigned char c;
8634
8635 - asm volatile(_ASM_SUB "%2,%0; sete %1"
8636 + asm volatile(_ASM_SUB "%2,%0\n"
8637 +
8638 +#ifdef CONFIG_PAX_REFCOUNT
8639 + "jno 0f\n"
8640 + _ASM_ADD "%2,%0\n"
8641 + "int $4\n0:\n"
8642 + _ASM_EXTABLE(0b, 0b)
8643 +#endif
8644 +
8645 + "sete %1\n"
8646 : "+m" (l->a.counter), "=qm" (c)
8647 : "ir" (i) : "memory");
8648 return c;
8649 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8650 {
8651 unsigned char c;
8652
8653 - asm volatile(_ASM_DEC "%0; sete %1"
8654 + asm volatile(_ASM_DEC "%0\n"
8655 +
8656 +#ifdef CONFIG_PAX_REFCOUNT
8657 + "jno 0f\n"
8658 + _ASM_INC "%0\n"
8659 + "int $4\n0:\n"
8660 + _ASM_EXTABLE(0b, 0b)
8661 +#endif
8662 +
8663 + "sete %1\n"
8664 : "+m" (l->a.counter), "=qm" (c)
8665 : : "memory");
8666 return c != 0;
8667 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8668 {
8669 unsigned char c;
8670
8671 - asm volatile(_ASM_INC "%0; sete %1"
8672 + asm volatile(_ASM_INC "%0\n"
8673 +
8674 +#ifdef CONFIG_PAX_REFCOUNT
8675 + "jno 0f\n"
8676 + _ASM_DEC "%0\n"
8677 + "int $4\n0:\n"
8678 + _ASM_EXTABLE(0b, 0b)
8679 +#endif
8680 +
8681 + "sete %1\n"
8682 : "+m" (l->a.counter), "=qm" (c)
8683 : : "memory");
8684 return c != 0;
8685 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8686 {
8687 unsigned char c;
8688
8689 - asm volatile(_ASM_ADD "%2,%0; sets %1"
8690 + asm volatile(_ASM_ADD "%2,%0\n"
8691 +
8692 +#ifdef CONFIG_PAX_REFCOUNT
8693 + "jno 0f\n"
8694 + _ASM_SUB "%2,%0\n"
8695 + "int $4\n0:\n"
8696 + _ASM_EXTABLE(0b, 0b)
8697 +#endif
8698 +
8699 + "sets %1\n"
8700 : "+m" (l->a.counter), "=qm" (c)
8701 : "ir" (i) : "memory");
8702 return c;
8703 @@ -133,7 +201,15 @@ static inline long local_add_return(long
8704 #endif
8705 /* Modern 486+ processor */
8706 __i = i;
8707 - asm volatile(_ASM_XADD "%0, %1;"
8708 + asm volatile(_ASM_XADD "%0, %1\n"
8709 +
8710 +#ifdef CONFIG_PAX_REFCOUNT
8711 + "jno 0f\n"
8712 + _ASM_MOV "%0,%1\n"
8713 + "int $4\n0:\n"
8714 + _ASM_EXTABLE(0b, 0b)
8715 +#endif
8716 +
8717 : "+r" (i), "+m" (l->a.counter)
8718 : : "memory");
8719 return i + __i;
8720 diff -urNp linux-2.6.32.42/arch/x86/include/asm/microcode.h linux-2.6.32.42/arch/x86/include/asm/microcode.h
8721 --- linux-2.6.32.42/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
8722 +++ linux-2.6.32.42/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
8723 @@ -12,13 +12,13 @@ struct device;
8724 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
8725
8726 struct microcode_ops {
8727 - enum ucode_state (*request_microcode_user) (int cpu,
8728 + enum ucode_state (* const request_microcode_user) (int cpu,
8729 const void __user *buf, size_t size);
8730
8731 - enum ucode_state (*request_microcode_fw) (int cpu,
8732 + enum ucode_state (* const request_microcode_fw) (int cpu,
8733 struct device *device);
8734
8735 - void (*microcode_fini_cpu) (int cpu);
8736 + void (* const microcode_fini_cpu) (int cpu);
8737
8738 /*
8739 * The generic 'microcode_core' part guarantees that
8740 @@ -38,18 +38,18 @@ struct ucode_cpu_info {
8741 extern struct ucode_cpu_info ucode_cpu_info[];
8742
8743 #ifdef CONFIG_MICROCODE_INTEL
8744 -extern struct microcode_ops * __init init_intel_microcode(void);
8745 +extern const struct microcode_ops * __init init_intel_microcode(void);
8746 #else
8747 -static inline struct microcode_ops * __init init_intel_microcode(void)
8748 +static inline const struct microcode_ops * __init init_intel_microcode(void)
8749 {
8750 return NULL;
8751 }
8752 #endif /* CONFIG_MICROCODE_INTEL */
8753
8754 #ifdef CONFIG_MICROCODE_AMD
8755 -extern struct microcode_ops * __init init_amd_microcode(void);
8756 +extern const struct microcode_ops * __init init_amd_microcode(void);
8757 #else
8758 -static inline struct microcode_ops * __init init_amd_microcode(void)
8759 +static inline const struct microcode_ops * __init init_amd_microcode(void)
8760 {
8761 return NULL;
8762 }
8763 diff -urNp linux-2.6.32.42/arch/x86/include/asm/mman.h linux-2.6.32.42/arch/x86/include/asm/mman.h
8764 --- linux-2.6.32.42/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
8765 +++ linux-2.6.32.42/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
8766 @@ -5,4 +5,14 @@
8767
8768 #include <asm-generic/mman.h>
8769
8770 +#ifdef __KERNEL__
8771 +#ifndef __ASSEMBLY__
8772 +#ifdef CONFIG_X86_32
8773 +#define arch_mmap_check i386_mmap_check
8774 +int i386_mmap_check(unsigned long addr, unsigned long len,
8775 + unsigned long flags);
8776 +#endif
8777 +#endif
8778 +#endif
8779 +
8780 #endif /* _ASM_X86_MMAN_H */
8781 diff -urNp linux-2.6.32.42/arch/x86/include/asm/mmu_context.h linux-2.6.32.42/arch/x86/include/asm/mmu_context.h
8782 --- linux-2.6.32.42/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
8783 +++ linux-2.6.32.42/arch/x86/include/asm/mmu_context.h 2011-04-17 15:56:46.000000000 -0400
8784 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
8785
8786 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8787 {
8788 +
8789 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8790 + unsigned int i;
8791 + pgd_t *pgd;
8792 +
8793 + pax_open_kernel();
8794 + pgd = get_cpu_pgd(smp_processor_id());
8795 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
8796 + if (paravirt_enabled())
8797 + set_pgd(pgd+i, native_make_pgd(0));
8798 + else
8799 + pgd[i] = native_make_pgd(0);
8800 + pax_close_kernel();
8801 +#endif
8802 +
8803 #ifdef CONFIG_SMP
8804 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
8805 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
8806 @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
8807 struct task_struct *tsk)
8808 {
8809 unsigned cpu = smp_processor_id();
8810 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
8811 + int tlbstate = TLBSTATE_OK;
8812 +#endif
8813
8814 if (likely(prev != next)) {
8815 #ifdef CONFIG_SMP
8816 +#ifdef CONFIG_X86_32
8817 + tlbstate = percpu_read(cpu_tlbstate.state);
8818 +#endif
8819 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8820 percpu_write(cpu_tlbstate.active_mm, next);
8821 #endif
8822 cpumask_set_cpu(cpu, mm_cpumask(next));
8823
8824 /* Re-load page tables */
8825 +#ifdef CONFIG_PAX_PER_CPU_PGD
8826 + pax_open_kernel();
8827 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8828 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8829 + pax_close_kernel();
8830 + load_cr3(get_cpu_pgd(cpu));
8831 +#else
8832 load_cr3(next->pgd);
8833 +#endif
8834
8835 /* stop flush ipis for the previous mm */
8836 cpumask_clear_cpu(cpu, mm_cpumask(prev));
8837 @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
8838 */
8839 if (unlikely(prev->context.ldt != next->context.ldt))
8840 load_LDT_nolock(&next->context);
8841 - }
8842 +
8843 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8844 + if (!nx_enabled) {
8845 + smp_mb__before_clear_bit();
8846 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
8847 + smp_mb__after_clear_bit();
8848 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8849 + }
8850 +#endif
8851 +
8852 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8853 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
8854 + prev->context.user_cs_limit != next->context.user_cs_limit))
8855 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8856 #ifdef CONFIG_SMP
8857 + else if (unlikely(tlbstate != TLBSTATE_OK))
8858 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8859 +#endif
8860 +#endif
8861 +
8862 + }
8863 else {
8864 +
8865 +#ifdef CONFIG_PAX_PER_CPU_PGD
8866 + pax_open_kernel();
8867 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
8868 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
8869 + pax_close_kernel();
8870 + load_cr3(get_cpu_pgd(cpu));
8871 +#endif
8872 +
8873 +#ifdef CONFIG_SMP
8874 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
8875 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
8876
8877 @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
8878 * tlb flush IPI delivery. We must reload CR3
8879 * to make sure to use no freed page tables.
8880 */
8881 +
8882 +#ifndef CONFIG_PAX_PER_CPU_PGD
8883 load_cr3(next->pgd);
8884 +#endif
8885 +
8886 load_LDT_nolock(&next->context);
8887 +
8888 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
8889 + if (!nx_enabled)
8890 + cpu_set(cpu, next->context.cpu_user_cs_mask);
8891 +#endif
8892 +
8893 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
8894 +#ifdef CONFIG_PAX_PAGEEXEC
8895 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
8896 +#endif
8897 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
8898 +#endif
8899 +
8900 }
8901 - }
8902 #endif
8903 + }
8904 }
8905
8906 #define activate_mm(prev, next) \
8907 diff -urNp linux-2.6.32.42/arch/x86/include/asm/mmu.h linux-2.6.32.42/arch/x86/include/asm/mmu.h
8908 --- linux-2.6.32.42/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
8909 +++ linux-2.6.32.42/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
8910 @@ -9,10 +9,23 @@
8911 * we put the segment information here.
8912 */
8913 typedef struct {
8914 - void *ldt;
8915 + struct desc_struct *ldt;
8916 int size;
8917 struct mutex lock;
8918 - void *vdso;
8919 + unsigned long vdso;
8920 +
8921 +#ifdef CONFIG_X86_32
8922 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
8923 + unsigned long user_cs_base;
8924 + unsigned long user_cs_limit;
8925 +
8926 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
8927 + cpumask_t cpu_user_cs_mask;
8928 +#endif
8929 +
8930 +#endif
8931 +#endif
8932 +
8933 } mm_context_t;
8934
8935 #ifdef CONFIG_SMP
8936 diff -urNp linux-2.6.32.42/arch/x86/include/asm/module.h linux-2.6.32.42/arch/x86/include/asm/module.h
8937 --- linux-2.6.32.42/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
8938 +++ linux-2.6.32.42/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
8939 @@ -5,6 +5,7 @@
8940
8941 #ifdef CONFIG_X86_64
8942 /* X86_64 does not define MODULE_PROC_FAMILY */
8943 +#define MODULE_PROC_FAMILY ""
8944 #elif defined CONFIG_M386
8945 #define MODULE_PROC_FAMILY "386 "
8946 #elif defined CONFIG_M486
8947 @@ -59,13 +60,36 @@
8948 #error unknown processor family
8949 #endif
8950
8951 -#ifdef CONFIG_X86_32
8952 -# ifdef CONFIG_4KSTACKS
8953 -# define MODULE_STACKSIZE "4KSTACKS "
8954 -# else
8955 -# define MODULE_STACKSIZE ""
8956 -# endif
8957 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
8958 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8959 +#define MODULE_PAX_UDEREF "UDEREF "
8960 +#else
8961 +#define MODULE_PAX_UDEREF ""
8962 +#endif
8963 +
8964 +#ifdef CONFIG_PAX_KERNEXEC
8965 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
8966 +#else
8967 +#define MODULE_PAX_KERNEXEC ""
8968 +#endif
8969 +
8970 +#ifdef CONFIG_PAX_REFCOUNT
8971 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
8972 +#else
8973 +#define MODULE_PAX_REFCOUNT ""
8974 #endif
8975
8976 +#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
8977 +#define MODULE_STACKSIZE "4KSTACKS "
8978 +#else
8979 +#define MODULE_STACKSIZE ""
8980 +#endif
8981 +
8982 +#ifdef CONFIG_GRKERNSEC
8983 +#define MODULE_GRSEC "GRSECURITY "
8984 +#else
8985 +#define MODULE_GRSEC ""
8986 +#endif
8987 +
8988 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
8989 +
8990 #endif /* _ASM_X86_MODULE_H */
8991 diff -urNp linux-2.6.32.42/arch/x86/include/asm/page_64_types.h linux-2.6.32.42/arch/x86/include/asm/page_64_types.h
8992 --- linux-2.6.32.42/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
8993 +++ linux-2.6.32.42/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
8994 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
8995
8996 /* duplicated to the one in bootmem.h */
8997 extern unsigned long max_pfn;
8998 -extern unsigned long phys_base;
8999 +extern const unsigned long phys_base;
9000
9001 extern unsigned long __phys_addr(unsigned long);
9002 #define __phys_reloc_hide(x) (x)
9003 diff -urNp linux-2.6.32.42/arch/x86/include/asm/paravirt.h linux-2.6.32.42/arch/x86/include/asm/paravirt.h
9004 --- linux-2.6.32.42/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9005 +++ linux-2.6.32.42/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
9006 @@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
9007 pv_mmu_ops.set_fixmap(idx, phys, flags);
9008 }
9009
9010 +#ifdef CONFIG_PAX_KERNEXEC
9011 +static inline unsigned long pax_open_kernel(void)
9012 +{
9013 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9014 +}
9015 +
9016 +static inline unsigned long pax_close_kernel(void)
9017 +{
9018 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9019 +}
9020 +#else
9021 +static inline unsigned long pax_open_kernel(void) { return 0; }
9022 +static inline unsigned long pax_close_kernel(void) { return 0; }
9023 +#endif
9024 +
9025 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9026
9027 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9028 @@ -945,7 +960,7 @@ extern void default_banner(void);
9029
9030 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9031 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9032 -#define PARA_INDIRECT(addr) *%cs:addr
9033 +#define PARA_INDIRECT(addr) *%ss:addr
9034 #endif
9035
9036 #define INTERRUPT_RETURN \
9037 @@ -1022,6 +1037,21 @@ extern void default_banner(void);
9038 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9039 CLBR_NONE, \
9040 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9041 +
9042 +#define GET_CR0_INTO_RDI \
9043 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9044 + mov %rax,%rdi
9045 +
9046 +#define SET_RDI_INTO_CR0 \
9047 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9048 +
9049 +#define GET_CR3_INTO_RDI \
9050 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9051 + mov %rax,%rdi
9052 +
9053 +#define SET_RDI_INTO_CR3 \
9054 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9055 +
9056 #endif /* CONFIG_X86_32 */
9057
9058 #endif /* __ASSEMBLY__ */
9059 diff -urNp linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h
9060 --- linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9061 +++ linux-2.6.32.42/arch/x86/include/asm/paravirt_types.h 2011-04-17 15:56:46.000000000 -0400
9062 @@ -316,6 +316,12 @@ struct pv_mmu_ops {
9063 an mfn. We can tell which is which from the index. */
9064 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9065 phys_addr_t phys, pgprot_t flags);
9066 +
9067 +#ifdef CONFIG_PAX_KERNEXEC
9068 + unsigned long (*pax_open_kernel)(void);
9069 + unsigned long (*pax_close_kernel)(void);
9070 +#endif
9071 +
9072 };
9073
9074 struct raw_spinlock;
9075 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pci_x86.h linux-2.6.32.42/arch/x86/include/asm/pci_x86.h
9076 --- linux-2.6.32.42/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9077 +++ linux-2.6.32.42/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9078 @@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9079 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9080
9081 struct pci_raw_ops {
9082 - int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9083 + int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9084 int reg, int len, u32 *val);
9085 - int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9086 + int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9087 int reg, int len, u32 val);
9088 };
9089
9090 -extern struct pci_raw_ops *raw_pci_ops;
9091 -extern struct pci_raw_ops *raw_pci_ext_ops;
9092 +extern const struct pci_raw_ops *raw_pci_ops;
9093 +extern const struct pci_raw_ops *raw_pci_ext_ops;
9094
9095 -extern struct pci_raw_ops pci_direct_conf1;
9096 +extern const struct pci_raw_ops pci_direct_conf1;
9097 extern bool port_cf9_safe;
9098
9099 /* arch_initcall level */
9100 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgalloc.h linux-2.6.32.42/arch/x86/include/asm/pgalloc.h
9101 --- linux-2.6.32.42/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9102 +++ linux-2.6.32.42/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9103 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9104 pmd_t *pmd, pte_t *pte)
9105 {
9106 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9107 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9108 +}
9109 +
9110 +static inline void pmd_populate_user(struct mm_struct *mm,
9111 + pmd_t *pmd, pte_t *pte)
9112 +{
9113 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9114 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9115 }
9116
9117 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h
9118 --- linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9119 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9120 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9121
9122 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9123 {
9124 + pax_open_kernel();
9125 *pmdp = pmd;
9126 + pax_close_kernel();
9127 }
9128
9129 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9130 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h
9131 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9132 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9133 @@ -26,9 +26,6 @@
9134 struct mm_struct;
9135 struct vm_area_struct;
9136
9137 -extern pgd_t swapper_pg_dir[1024];
9138 -extern pgd_t trampoline_pg_dir[1024];
9139 -
9140 static inline void pgtable_cache_init(void) { }
9141 static inline void check_pgt_cache(void) { }
9142 void paging_init(void);
9143 @@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9144 # include <asm/pgtable-2level.h>
9145 #endif
9146
9147 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9148 +extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9149 +#ifdef CONFIG_X86_PAE
9150 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9151 +#endif
9152 +
9153 #if defined(CONFIG_HIGHPTE)
9154 #define __KM_PTE \
9155 (in_nmi() ? KM_NMI_PTE : \
9156 @@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9157 /* Clear a kernel PTE and flush it from the TLB */
9158 #define kpte_clear_flush(ptep, vaddr) \
9159 do { \
9160 + pax_open_kernel(); \
9161 pte_clear(&init_mm, (vaddr), (ptep)); \
9162 + pax_close_kernel(); \
9163 __flush_tlb_one((vaddr)); \
9164 } while (0)
9165
9166 @@ -85,6 +90,9 @@ do { \
9167
9168 #endif /* !__ASSEMBLY__ */
9169
9170 +#define HAVE_ARCH_UNMAPPED_AREA
9171 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9172 +
9173 /*
9174 * kern_addr_valid() is (1) for FLATMEM and (0) for
9175 * SPARSEMEM and DISCONTIGMEM
9176 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h
9177 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9178 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9179 @@ -8,7 +8,7 @@
9180 */
9181 #ifdef CONFIG_X86_PAE
9182 # include <asm/pgtable-3level_types.h>
9183 -# define PMD_SIZE (1UL << PMD_SHIFT)
9184 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9185 # define PMD_MASK (~(PMD_SIZE - 1))
9186 #else
9187 # include <asm/pgtable-2level_types.h>
9188 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9189 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9190 #endif
9191
9192 +#ifdef CONFIG_PAX_KERNEXEC
9193 +#ifndef __ASSEMBLY__
9194 +extern unsigned char MODULES_EXEC_VADDR[];
9195 +extern unsigned char MODULES_EXEC_END[];
9196 +#endif
9197 +#include <asm/boot.h>
9198 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9199 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9200 +#else
9201 +#define ktla_ktva(addr) (addr)
9202 +#define ktva_ktla(addr) (addr)
9203 +#endif
9204 +
9205 #define MODULES_VADDR VMALLOC_START
9206 #define MODULES_END VMALLOC_END
9207 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9208 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h
9209 --- linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9210 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9211 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9212
9213 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9214 {
9215 + pax_open_kernel();
9216 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9217 + pax_close_kernel();
9218 }
9219
9220 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9221 {
9222 + pax_open_kernel();
9223 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9224 + pax_close_kernel();
9225 }
9226
9227 /*
9228 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h
9229 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9230 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9231 @@ -16,10 +16,13 @@
9232
9233 extern pud_t level3_kernel_pgt[512];
9234 extern pud_t level3_ident_pgt[512];
9235 +extern pud_t level3_vmalloc_pgt[512];
9236 +extern pud_t level3_vmemmap_pgt[512];
9237 +extern pud_t level2_vmemmap_pgt[512];
9238 extern pmd_t level2_kernel_pgt[512];
9239 extern pmd_t level2_fixmap_pgt[512];
9240 -extern pmd_t level2_ident_pgt[512];
9241 -extern pgd_t init_level4_pgt[];
9242 +extern pmd_t level2_ident_pgt[512*2];
9243 +extern pgd_t init_level4_pgt[512];
9244
9245 #define swapper_pg_dir init_level4_pgt
9246
9247 @@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9248
9249 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9250 {
9251 + pax_open_kernel();
9252 *pmdp = pmd;
9253 + pax_close_kernel();
9254 }
9255
9256 static inline void native_pmd_clear(pmd_t *pmd)
9257 @@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9258
9259 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9260 {
9261 + pax_open_kernel();
9262 *pgdp = pgd;
9263 + pax_close_kernel();
9264 }
9265
9266 static inline void native_pgd_clear(pgd_t *pgd)
9267 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h
9268 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9269 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9270 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9271 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9272 #define MODULES_END _AC(0xffffffffff000000, UL)
9273 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9274 +#define MODULES_EXEC_VADDR MODULES_VADDR
9275 +#define MODULES_EXEC_END MODULES_END
9276 +
9277 +#define ktla_ktva(addr) (addr)
9278 +#define ktva_ktla(addr) (addr)
9279
9280 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9281 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable.h linux-2.6.32.42/arch/x86/include/asm/pgtable.h
9282 --- linux-2.6.32.42/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9283 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9284 @@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9285
9286 #define arch_end_context_switch(prev) do {} while(0)
9287
9288 +#define pax_open_kernel() native_pax_open_kernel()
9289 +#define pax_close_kernel() native_pax_close_kernel()
9290 #endif /* CONFIG_PARAVIRT */
9291
9292 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
9293 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9294 +
9295 +#ifdef CONFIG_PAX_KERNEXEC
9296 +static inline unsigned long native_pax_open_kernel(void)
9297 +{
9298 + unsigned long cr0;
9299 +
9300 + preempt_disable();
9301 + barrier();
9302 + cr0 = read_cr0() ^ X86_CR0_WP;
9303 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
9304 + write_cr0(cr0);
9305 + return cr0 ^ X86_CR0_WP;
9306 +}
9307 +
9308 +static inline unsigned long native_pax_close_kernel(void)
9309 +{
9310 + unsigned long cr0;
9311 +
9312 + cr0 = read_cr0() ^ X86_CR0_WP;
9313 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9314 + write_cr0(cr0);
9315 + barrier();
9316 + preempt_enable_no_resched();
9317 + return cr0 ^ X86_CR0_WP;
9318 +}
9319 +#else
9320 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
9321 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
9322 +#endif
9323 +
9324 /*
9325 * The following only work if pte_present() is true.
9326 * Undefined behaviour if not..
9327 */
9328 +static inline int pte_user(pte_t pte)
9329 +{
9330 + return pte_val(pte) & _PAGE_USER;
9331 +}
9332 +
9333 static inline int pte_dirty(pte_t pte)
9334 {
9335 return pte_flags(pte) & _PAGE_DIRTY;
9336 @@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9337 return pte_clear_flags(pte, _PAGE_RW);
9338 }
9339
9340 +static inline pte_t pte_mkread(pte_t pte)
9341 +{
9342 + return __pte(pte_val(pte) | _PAGE_USER);
9343 +}
9344 +
9345 static inline pte_t pte_mkexec(pte_t pte)
9346 {
9347 - return pte_clear_flags(pte, _PAGE_NX);
9348 +#ifdef CONFIG_X86_PAE
9349 + if (__supported_pte_mask & _PAGE_NX)
9350 + return pte_clear_flags(pte, _PAGE_NX);
9351 + else
9352 +#endif
9353 + return pte_set_flags(pte, _PAGE_USER);
9354 +}
9355 +
9356 +static inline pte_t pte_exprotect(pte_t pte)
9357 +{
9358 +#ifdef CONFIG_X86_PAE
9359 + if (__supported_pte_mask & _PAGE_NX)
9360 + return pte_set_flags(pte, _PAGE_NX);
9361 + else
9362 +#endif
9363 + return pte_clear_flags(pte, _PAGE_USER);
9364 }
9365
9366 static inline pte_t pte_mkdirty(pte_t pte)
9367 @@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9368 #endif
9369
9370 #ifndef __ASSEMBLY__
9371 +
9372 +#ifdef CONFIG_PAX_PER_CPU_PGD
9373 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9374 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9375 +{
9376 + return cpu_pgd[cpu];
9377 +}
9378 +#endif
9379 +
9380 #include <linux/mm_types.h>
9381
9382 static inline int pte_none(pte_t pte)
9383 @@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9384
9385 static inline int pgd_bad(pgd_t pgd)
9386 {
9387 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9388 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9389 }
9390
9391 static inline int pgd_none(pgd_t pgd)
9392 @@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9393 * pgd_offset() returns a (pgd_t *)
9394 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9395 */
9396 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9397 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9398 +
9399 +#ifdef CONFIG_PAX_PER_CPU_PGD
9400 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9401 +#endif
9402 +
9403 /*
9404 * a shortcut which implies the use of the kernel's pgd, instead
9405 * of a process's
9406 @@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9407 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9408 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9409
9410 +#ifdef CONFIG_X86_32
9411 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9412 +#else
9413 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9414 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9415 +
9416 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9417 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9418 +#else
9419 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9420 +#endif
9421 +
9422 +#endif
9423 +
9424 #ifndef __ASSEMBLY__
9425
9426 extern int direct_gbpages;
9427 @@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9428 * dst and src can be on the same page, but the range must not overlap,
9429 * and must not cross a page boundary.
9430 */
9431 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9432 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9433 {
9434 - memcpy(dst, src, count * sizeof(pgd_t));
9435 + pax_open_kernel();
9436 + while (count--)
9437 + *dst++ = *src++;
9438 + pax_close_kernel();
9439 }
9440
9441 +#ifdef CONFIG_PAX_PER_CPU_PGD
9442 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9443 +#endif
9444 +
9445 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9446 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9447 +#else
9448 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9449 +#endif
9450
9451 #include <asm-generic/pgtable.h>
9452 #endif /* __ASSEMBLY__ */
9453 diff -urNp linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h
9454 --- linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9455 +++ linux-2.6.32.42/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9456 @@ -16,12 +16,11 @@
9457 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9458 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9459 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9460 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9461 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9462 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9463 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9464 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9465 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9466 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9467 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9468 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9469
9470 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9471 @@ -39,7 +38,6 @@
9472 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9473 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9474 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9475 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9476 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9477 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9478 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9479 @@ -55,8 +53,10 @@
9480
9481 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9482 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9483 -#else
9484 +#elif defined(CONFIG_KMEMCHECK)
9485 #define _PAGE_NX (_AT(pteval_t, 0))
9486 +#else
9487 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9488 #endif
9489
9490 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9491 @@ -93,6 +93,9 @@
9492 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9493 _PAGE_ACCESSED)
9494
9495 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
9496 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
9497 +
9498 #define __PAGE_KERNEL_EXEC \
9499 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9500 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9501 @@ -103,8 +106,8 @@
9502 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9503 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9504 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9505 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9506 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9507 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9508 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9509 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9510 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9511 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9512 @@ -163,8 +166,8 @@
9513 * bits are combined, this will alow user to access the high address mapped
9514 * VDSO in the presence of CONFIG_COMPAT_VDSO
9515 */
9516 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9517 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9518 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9519 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9520 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9521 #endif
9522
9523 @@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9524 {
9525 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9526 }
9527 +#endif
9528
9529 +#if PAGETABLE_LEVELS == 3
9530 +#include <asm-generic/pgtable-nopud.h>
9531 +#endif
9532 +
9533 +#if PAGETABLE_LEVELS == 2
9534 +#include <asm-generic/pgtable-nopmd.h>
9535 +#endif
9536 +
9537 +#ifndef __ASSEMBLY__
9538 #if PAGETABLE_LEVELS > 3
9539 typedef struct { pudval_t pud; } pud_t;
9540
9541 @@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9542 return pud.pud;
9543 }
9544 #else
9545 -#include <asm-generic/pgtable-nopud.h>
9546 -
9547 static inline pudval_t native_pud_val(pud_t pud)
9548 {
9549 return native_pgd_val(pud.pgd);
9550 @@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9551 return pmd.pmd;
9552 }
9553 #else
9554 -#include <asm-generic/pgtable-nopmd.h>
9555 -
9556 static inline pmdval_t native_pmd_val(pmd_t pmd)
9557 {
9558 return native_pgd_val(pmd.pud.pgd);
9559 @@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9560
9561 extern pteval_t __supported_pte_mask;
9562 extern void set_nx(void);
9563 +
9564 +#ifdef CONFIG_X86_32
9565 +#ifdef CONFIG_X86_PAE
9566 extern int nx_enabled;
9567 +#else
9568 +#define nx_enabled (0)
9569 +#endif
9570 +#else
9571 +#define nx_enabled (1)
9572 +#endif
9573
9574 #define pgprot_writecombine pgprot_writecombine
9575 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9576 diff -urNp linux-2.6.32.42/arch/x86/include/asm/processor.h linux-2.6.32.42/arch/x86/include/asm/processor.h
9577 --- linux-2.6.32.42/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9578 +++ linux-2.6.32.42/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9579 @@ -272,7 +272,7 @@ struct tss_struct {
9580
9581 } ____cacheline_aligned;
9582
9583 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9584 +extern struct tss_struct init_tss[NR_CPUS];
9585
9586 /*
9587 * Save the original ist values for checking stack pointers during debugging
9588 @@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9589 */
9590 #define TASK_SIZE PAGE_OFFSET
9591 #define TASK_SIZE_MAX TASK_SIZE
9592 +
9593 +#ifdef CONFIG_PAX_SEGMEXEC
9594 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9595 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9596 +#else
9597 #define STACK_TOP TASK_SIZE
9598 -#define STACK_TOP_MAX STACK_TOP
9599 +#endif
9600 +
9601 +#define STACK_TOP_MAX TASK_SIZE
9602
9603 #define INIT_THREAD { \
9604 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9605 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9606 .vm86_info = NULL, \
9607 .sysenter_cs = __KERNEL_CS, \
9608 .io_bitmap_ptr = NULL, \
9609 @@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
9610 */
9611 #define INIT_TSS { \
9612 .x86_tss = { \
9613 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
9614 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9615 .ss0 = __KERNEL_DS, \
9616 .ss1 = __KERNEL_CS, \
9617 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9618 @@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
9619 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9620
9621 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9622 -#define KSTK_TOP(info) \
9623 -({ \
9624 - unsigned long *__ptr = (unsigned long *)(info); \
9625 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9626 -})
9627 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9628
9629 /*
9630 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9631 @@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
9632 #define task_pt_regs(task) \
9633 ({ \
9634 struct pt_regs *__regs__; \
9635 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9636 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9637 __regs__ - 1; \
9638 })
9639
9640 @@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
9641 /*
9642 * User space process size. 47bits minus one guard page.
9643 */
9644 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9645 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9646
9647 /* This decides where the kernel will search for a free chunk of vm
9648 * space during mmap's.
9649 */
9650 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9651 - 0xc0000000 : 0xFFFFe000)
9652 + 0xc0000000 : 0xFFFFf000)
9653
9654 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9655 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9656 @@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
9657 #define STACK_TOP_MAX TASK_SIZE_MAX
9658
9659 #define INIT_THREAD { \
9660 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9661 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9662 }
9663
9664 #define INIT_TSS { \
9665 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9666 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9667 }
9668
9669 /*
9670 @@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
9671 */
9672 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9673
9674 +#ifdef CONFIG_PAX_SEGMEXEC
9675 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9676 +#endif
9677 +
9678 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9679
9680 /* Get/set a process' ability to use the timestamp counter instruction */
9681 diff -urNp linux-2.6.32.42/arch/x86/include/asm/ptrace.h linux-2.6.32.42/arch/x86/include/asm/ptrace.h
9682 --- linux-2.6.32.42/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
9683 +++ linux-2.6.32.42/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
9684 @@ -151,28 +151,29 @@ static inline unsigned long regs_return_
9685 }
9686
9687 /*
9688 - * user_mode_vm(regs) determines whether a register set came from user mode.
9689 + * user_mode(regs) determines whether a register set came from user mode.
9690 * This is true if V8086 mode was enabled OR if the register set was from
9691 * protected mode with RPL-3 CS value. This tricky test checks that with
9692 * one comparison. Many places in the kernel can bypass this full check
9693 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9694 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9695 + * be used.
9696 */
9697 -static inline int user_mode(struct pt_regs *regs)
9698 +static inline int user_mode_novm(struct pt_regs *regs)
9699 {
9700 #ifdef CONFIG_X86_32
9701 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9702 #else
9703 - return !!(regs->cs & 3);
9704 + return !!(regs->cs & SEGMENT_RPL_MASK);
9705 #endif
9706 }
9707
9708 -static inline int user_mode_vm(struct pt_regs *regs)
9709 +static inline int user_mode(struct pt_regs *regs)
9710 {
9711 #ifdef CONFIG_X86_32
9712 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9713 USER_RPL;
9714 #else
9715 - return user_mode(regs);
9716 + return user_mode_novm(regs);
9717 #endif
9718 }
9719
9720 diff -urNp linux-2.6.32.42/arch/x86/include/asm/reboot.h linux-2.6.32.42/arch/x86/include/asm/reboot.h
9721 --- linux-2.6.32.42/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
9722 +++ linux-2.6.32.42/arch/x86/include/asm/reboot.h 2011-05-22 23:02:03.000000000 -0400
9723 @@ -6,19 +6,19 @@
9724 struct pt_regs;
9725
9726 struct machine_ops {
9727 - void (*restart)(char *cmd);
9728 - void (*halt)(void);
9729 - void (*power_off)(void);
9730 + void (* __noreturn restart)(char *cmd);
9731 + void (* __noreturn halt)(void);
9732 + void (* __noreturn power_off)(void);
9733 void (*shutdown)(void);
9734 void (*crash_shutdown)(struct pt_regs *);
9735 - void (*emergency_restart)(void);
9736 + void (* __noreturn emergency_restart)(void);
9737 };
9738
9739 extern struct machine_ops machine_ops;
9740
9741 void native_machine_crash_shutdown(struct pt_regs *regs);
9742 void native_machine_shutdown(void);
9743 -void machine_real_restart(const unsigned char *code, int length);
9744 +void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
9745
9746 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
9747 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
9748 diff -urNp linux-2.6.32.42/arch/x86/include/asm/rwsem.h linux-2.6.32.42/arch/x86/include/asm/rwsem.h
9749 --- linux-2.6.32.42/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
9750 +++ linux-2.6.32.42/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
9751 @@ -118,6 +118,14 @@ static inline void __down_read(struct rw
9752 {
9753 asm volatile("# beginning down_read\n\t"
9754 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9755 +
9756 +#ifdef CONFIG_PAX_REFCOUNT
9757 + "jno 0f\n"
9758 + LOCK_PREFIX _ASM_DEC "(%1)\n\t"
9759 + "int $4\n0:\n"
9760 + _ASM_EXTABLE(0b, 0b)
9761 +#endif
9762 +
9763 /* adds 0x00000001, returns the old value */
9764 " jns 1f\n"
9765 " call call_rwsem_down_read_failed\n"
9766 @@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
9767 "1:\n\t"
9768 " mov %1,%2\n\t"
9769 " add %3,%2\n\t"
9770 +
9771 +#ifdef CONFIG_PAX_REFCOUNT
9772 + "jno 0f\n"
9773 + "sub %3,%2\n"
9774 + "int $4\n0:\n"
9775 + _ASM_EXTABLE(0b, 0b)
9776 +#endif
9777 +
9778 " jle 2f\n\t"
9779 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9780 " jnz 1b\n\t"
9781 @@ -160,6 +176,14 @@ static inline void __down_write_nested(s
9782 tmp = RWSEM_ACTIVE_WRITE_BIAS;
9783 asm volatile("# beginning down_write\n\t"
9784 LOCK_PREFIX " xadd %1,(%2)\n\t"
9785 +
9786 +#ifdef CONFIG_PAX_REFCOUNT
9787 + "jno 0f\n"
9788 + "mov %1,(%2)\n"
9789 + "int $4\n0:\n"
9790 + _ASM_EXTABLE(0b, 0b)
9791 +#endif
9792 +
9793 /* subtract 0x0000ffff, returns the old value */
9794 " test %1,%1\n\t"
9795 /* was the count 0 before? */
9796 @@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
9797 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
9798 asm volatile("# beginning __up_read\n\t"
9799 LOCK_PREFIX " xadd %1,(%2)\n\t"
9800 +
9801 +#ifdef CONFIG_PAX_REFCOUNT
9802 + "jno 0f\n"
9803 + "mov %1,(%2)\n"
9804 + "int $4\n0:\n"
9805 + _ASM_EXTABLE(0b, 0b)
9806 +#endif
9807 +
9808 /* subtracts 1, returns the old value */
9809 " jns 1f\n\t"
9810 " call call_rwsem_wake\n"
9811 @@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
9812 rwsem_count_t tmp;
9813 asm volatile("# beginning __up_write\n\t"
9814 LOCK_PREFIX " xadd %1,(%2)\n\t"
9815 +
9816 +#ifdef CONFIG_PAX_REFCOUNT
9817 + "jno 0f\n"
9818 + "mov %1,(%2)\n"
9819 + "int $4\n0:\n"
9820 + _ASM_EXTABLE(0b, 0b)
9821 +#endif
9822 +
9823 /* tries to transition
9824 0xffff0001 -> 0x00000000 */
9825 " jz 1f\n"
9826 @@ -234,6 +274,14 @@ static inline void __downgrade_write(str
9827 {
9828 asm volatile("# beginning __downgrade_write\n\t"
9829 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
9830 +
9831 +#ifdef CONFIG_PAX_REFCOUNT
9832 + "jno 0f\n"
9833 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
9834 + "int $4\n0:\n"
9835 + _ASM_EXTABLE(0b, 0b)
9836 +#endif
9837 +
9838 /*
9839 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
9840 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
9841 @@ -253,7 +301,15 @@ static inline void __downgrade_write(str
9842 static inline void rwsem_atomic_add(rwsem_count_t delta,
9843 struct rw_semaphore *sem)
9844 {
9845 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
9846 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
9847 +
9848 +#ifdef CONFIG_PAX_REFCOUNT
9849 + "jno 0f\n"
9850 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
9851 + "int $4\n0:\n"
9852 + _ASM_EXTABLE(0b, 0b)
9853 +#endif
9854 +
9855 : "+m" (sem->count)
9856 : "er" (delta));
9857 }
9858 @@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
9859 {
9860 rwsem_count_t tmp = delta;
9861
9862 - asm volatile(LOCK_PREFIX "xadd %0,%1"
9863 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
9864 +
9865 +#ifdef CONFIG_PAX_REFCOUNT
9866 + "jno 0f\n"
9867 + "mov %0,%1\n"
9868 + "int $4\n0:\n"
9869 + _ASM_EXTABLE(0b, 0b)
9870 +#endif
9871 +
9872 : "+r" (tmp), "+m" (sem->count)
9873 : : "memory");
9874
9875 diff -urNp linux-2.6.32.42/arch/x86/include/asm/segment.h linux-2.6.32.42/arch/x86/include/asm/segment.h
9876 --- linux-2.6.32.42/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
9877 +++ linux-2.6.32.42/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
9878 @@ -62,8 +62,8 @@
9879 * 26 - ESPFIX small SS
9880 * 27 - per-cpu [ offset to per-cpu data area ]
9881 * 28 - stack_canary-20 [ for stack protector ]
9882 - * 29 - unused
9883 - * 30 - unused
9884 + * 29 - PCI BIOS CS
9885 + * 30 - PCI BIOS DS
9886 * 31 - TSS for double fault handler
9887 */
9888 #define GDT_ENTRY_TLS_MIN 6
9889 @@ -77,6 +77,8 @@
9890
9891 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
9892
9893 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
9894 +
9895 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
9896
9897 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
9898 @@ -88,7 +90,7 @@
9899 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
9900 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
9901
9902 -#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
9903 +#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
9904 #ifdef CONFIG_SMP
9905 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
9906 #else
9907 @@ -102,6 +104,12 @@
9908 #define __KERNEL_STACK_CANARY 0
9909 #endif
9910
9911 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
9912 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
9913 +
9914 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
9915 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
9916 +
9917 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
9918
9919 /*
9920 @@ -139,7 +147,7 @@
9921 */
9922
9923 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
9924 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
9925 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
9926
9927
9928 #else
9929 @@ -163,6 +171,8 @@
9930 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
9931 #define __USER32_DS __USER_DS
9932
9933 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
9934 +
9935 #define GDT_ENTRY_TSS 8 /* needs two entries */
9936 #define GDT_ENTRY_LDT 10 /* needs two entries */
9937 #define GDT_ENTRY_TLS_MIN 12
9938 @@ -183,6 +193,7 @@
9939 #endif
9940
9941 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
9942 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
9943 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
9944 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
9945 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
9946 diff -urNp linux-2.6.32.42/arch/x86/include/asm/smp.h linux-2.6.32.42/arch/x86/include/asm/smp.h
9947 --- linux-2.6.32.42/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
9948 +++ linux-2.6.32.42/arch/x86/include/asm/smp.h 2011-04-17 15:56:46.000000000 -0400
9949 @@ -24,7 +24,7 @@ extern unsigned int num_processors;
9950 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
9951 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
9952 DECLARE_PER_CPU(u16, cpu_llc_id);
9953 -DECLARE_PER_CPU(int, cpu_number);
9954 +DECLARE_PER_CPU(unsigned int, cpu_number);
9955
9956 static inline struct cpumask *cpu_sibling_mask(int cpu)
9957 {
9958 @@ -175,14 +175,8 @@ extern unsigned disabled_cpus __cpuinitd
9959 extern int safe_smp_processor_id(void);
9960
9961 #elif defined(CONFIG_X86_64_SMP)
9962 -#define raw_smp_processor_id() (percpu_read(cpu_number))
9963 -
9964 -#define stack_smp_processor_id() \
9965 -({ \
9966 - struct thread_info *ti; \
9967 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
9968 - ti->cpu; \
9969 -})
9970 +#define raw_smp_processor_id() (percpu_read(cpu_number))
9971 +#define stack_smp_processor_id() raw_smp_processor_id()
9972 #define safe_smp_processor_id() smp_processor_id()
9973
9974 #endif
9975 diff -urNp linux-2.6.32.42/arch/x86/include/asm/spinlock.h linux-2.6.32.42/arch/x86/include/asm/spinlock.h
9976 --- linux-2.6.32.42/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
9977 +++ linux-2.6.32.42/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
9978 @@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
9979 static inline void __raw_read_lock(raw_rwlock_t *rw)
9980 {
9981 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
9982 +
9983 +#ifdef CONFIG_PAX_REFCOUNT
9984 + "jno 0f\n"
9985 + LOCK_PREFIX " addl $1,(%0)\n"
9986 + "int $4\n0:\n"
9987 + _ASM_EXTABLE(0b, 0b)
9988 +#endif
9989 +
9990 "jns 1f\n"
9991 "call __read_lock_failed\n\t"
9992 "1:\n"
9993 @@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
9994 static inline void __raw_write_lock(raw_rwlock_t *rw)
9995 {
9996 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
9997 +
9998 +#ifdef CONFIG_PAX_REFCOUNT
9999 + "jno 0f\n"
10000 + LOCK_PREFIX " addl %1,(%0)\n"
10001 + "int $4\n0:\n"
10002 + _ASM_EXTABLE(0b, 0b)
10003 +#endif
10004 +
10005 "jz 1f\n"
10006 "call __write_lock_failed\n\t"
10007 "1:\n"
10008 @@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10009
10010 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10011 {
10012 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10013 + asm volatile(LOCK_PREFIX "incl %0\n"
10014 +
10015 +#ifdef CONFIG_PAX_REFCOUNT
10016 + "jno 0f\n"
10017 + LOCK_PREFIX "decl %0\n"
10018 + "int $4\n0:\n"
10019 + _ASM_EXTABLE(0b, 0b)
10020 +#endif
10021 +
10022 + :"+m" (rw->lock) : : "memory");
10023 }
10024
10025 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10026 {
10027 - asm volatile(LOCK_PREFIX "addl %1, %0"
10028 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
10029 +
10030 +#ifdef CONFIG_PAX_REFCOUNT
10031 + "jno 0f\n"
10032 + LOCK_PREFIX "subl %1, %0\n"
10033 + "int $4\n0:\n"
10034 + _ASM_EXTABLE(0b, 0b)
10035 +#endif
10036 +
10037 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10038 }
10039
10040 diff -urNp linux-2.6.32.42/arch/x86/include/asm/stackprotector.h linux-2.6.32.42/arch/x86/include/asm/stackprotector.h
10041 --- linux-2.6.32.42/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10042 +++ linux-2.6.32.42/arch/x86/include/asm/stackprotector.h 2011-04-17 15:56:46.000000000 -0400
10043 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10044
10045 static inline void load_stack_canary_segment(void)
10046 {
10047 -#ifdef CONFIG_X86_32
10048 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10049 asm volatile ("mov %0, %%gs" : : "r" (0));
10050 #endif
10051 }
10052 diff -urNp linux-2.6.32.42/arch/x86/include/asm/system.h linux-2.6.32.42/arch/x86/include/asm/system.h
10053 --- linux-2.6.32.42/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10054 +++ linux-2.6.32.42/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10055 @@ -132,7 +132,7 @@ do { \
10056 "thread_return:\n\t" \
10057 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10058 __switch_canary \
10059 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
10060 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10061 "movq %%rax,%%rdi\n\t" \
10062 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10063 "jnz ret_from_fork\n\t" \
10064 @@ -143,7 +143,7 @@ do { \
10065 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10066 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10067 [_tif_fork] "i" (_TIF_FORK), \
10068 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
10069 + [thread_info] "m" (per_cpu_var(current_tinfo)), \
10070 [current_task] "m" (per_cpu_var(current_task)) \
10071 __switch_canary_iparam \
10072 : "memory", "cc" __EXTRA_CLOBBER)
10073 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10074 {
10075 unsigned long __limit;
10076 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10077 - return __limit + 1;
10078 + return __limit;
10079 }
10080
10081 static inline void native_clts(void)
10082 @@ -340,12 +340,12 @@ void enable_hlt(void);
10083
10084 void cpu_idle_wait(void);
10085
10086 -extern unsigned long arch_align_stack(unsigned long sp);
10087 +#define arch_align_stack(x) ((x) & ~0xfUL)
10088 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10089
10090 void default_idle(void);
10091
10092 -void stop_this_cpu(void *dummy);
10093 +void stop_this_cpu(void *dummy) __noreturn;
10094
10095 /*
10096 * Force strict CPU ordering.
10097 diff -urNp linux-2.6.32.42/arch/x86/include/asm/thread_info.h linux-2.6.32.42/arch/x86/include/asm/thread_info.h
10098 --- linux-2.6.32.42/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10099 +++ linux-2.6.32.42/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10100 @@ -10,6 +10,7 @@
10101 #include <linux/compiler.h>
10102 #include <asm/page.h>
10103 #include <asm/types.h>
10104 +#include <asm/percpu.h>
10105
10106 /*
10107 * low level task data that entry.S needs immediate access to
10108 @@ -24,7 +25,6 @@ struct exec_domain;
10109 #include <asm/atomic.h>
10110
10111 struct thread_info {
10112 - struct task_struct *task; /* main task structure */
10113 struct exec_domain *exec_domain; /* execution domain */
10114 __u32 flags; /* low level flags */
10115 __u32 status; /* thread synchronous flags */
10116 @@ -34,18 +34,12 @@ struct thread_info {
10117 mm_segment_t addr_limit;
10118 struct restart_block restart_block;
10119 void __user *sysenter_return;
10120 -#ifdef CONFIG_X86_32
10121 - unsigned long previous_esp; /* ESP of the previous stack in
10122 - case of nested (IRQ) stacks
10123 - */
10124 - __u8 supervisor_stack[0];
10125 -#endif
10126 + unsigned long lowest_stack;
10127 int uaccess_err;
10128 };
10129
10130 -#define INIT_THREAD_INFO(tsk) \
10131 +#define INIT_THREAD_INFO \
10132 { \
10133 - .task = &tsk, \
10134 .exec_domain = &default_exec_domain, \
10135 .flags = 0, \
10136 .cpu = 0, \
10137 @@ -56,7 +50,7 @@ struct thread_info {
10138 }, \
10139 }
10140
10141 -#define init_thread_info (init_thread_union.thread_info)
10142 +#define init_thread_info (init_thread_union.stack)
10143 #define init_stack (init_thread_union.stack)
10144
10145 #else /* !__ASSEMBLY__ */
10146 @@ -163,6 +157,23 @@ struct thread_info {
10147 #define alloc_thread_info(tsk) \
10148 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10149
10150 +#ifdef __ASSEMBLY__
10151 +/* how to get the thread information struct from ASM */
10152 +#define GET_THREAD_INFO(reg) \
10153 + mov PER_CPU_VAR(current_tinfo), reg
10154 +
10155 +/* use this one if reg already contains %esp */
10156 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10157 +#else
10158 +/* how to get the thread information struct from C */
10159 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10160 +
10161 +static __always_inline struct thread_info *current_thread_info(void)
10162 +{
10163 + return percpu_read_stable(current_tinfo);
10164 +}
10165 +#endif
10166 +
10167 #ifdef CONFIG_X86_32
10168
10169 #define STACK_WARN (THREAD_SIZE/8)
10170 @@ -173,35 +184,13 @@ struct thread_info {
10171 */
10172 #ifndef __ASSEMBLY__
10173
10174 -
10175 /* how to get the current stack pointer from C */
10176 register unsigned long current_stack_pointer asm("esp") __used;
10177
10178 -/* how to get the thread information struct from C */
10179 -static inline struct thread_info *current_thread_info(void)
10180 -{
10181 - return (struct thread_info *)
10182 - (current_stack_pointer & ~(THREAD_SIZE - 1));
10183 -}
10184 -
10185 -#else /* !__ASSEMBLY__ */
10186 -
10187 -/* how to get the thread information struct from ASM */
10188 -#define GET_THREAD_INFO(reg) \
10189 - movl $-THREAD_SIZE, reg; \
10190 - andl %esp, reg
10191 -
10192 -/* use this one if reg already contains %esp */
10193 -#define GET_THREAD_INFO_WITH_ESP(reg) \
10194 - andl $-THREAD_SIZE, reg
10195 -
10196 #endif
10197
10198 #else /* X86_32 */
10199
10200 -#include <asm/percpu.h>
10201 -#define KERNEL_STACK_OFFSET (5*8)
10202 -
10203 /*
10204 * macros/functions for gaining access to the thread information structure
10205 * preempt_count needs to be 1 initially, until the scheduler is functional.
10206 @@ -209,21 +198,8 @@ static inline struct thread_info *curren
10207 #ifndef __ASSEMBLY__
10208 DECLARE_PER_CPU(unsigned long, kernel_stack);
10209
10210 -static inline struct thread_info *current_thread_info(void)
10211 -{
10212 - struct thread_info *ti;
10213 - ti = (void *)(percpu_read_stable(kernel_stack) +
10214 - KERNEL_STACK_OFFSET - THREAD_SIZE);
10215 - return ti;
10216 -}
10217 -
10218 -#else /* !__ASSEMBLY__ */
10219 -
10220 -/* how to get the thread information struct from ASM */
10221 -#define GET_THREAD_INFO(reg) \
10222 - movq PER_CPU_VAR(kernel_stack),reg ; \
10223 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10224 -
10225 +/* how to get the current stack pointer from C */
10226 +register unsigned long current_stack_pointer asm("rsp") __used;
10227 #endif
10228
10229 #endif /* !X86_32 */
10230 @@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10231 extern void free_thread_info(struct thread_info *ti);
10232 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10233 #define arch_task_cache_init arch_task_cache_init
10234 +
10235 +#define __HAVE_THREAD_FUNCTIONS
10236 +#define task_thread_info(task) (&(task)->tinfo)
10237 +#define task_stack_page(task) ((task)->stack)
10238 +#define setup_thread_stack(p, org) do {} while (0)
10239 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10240 +
10241 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10242 +extern struct task_struct *alloc_task_struct(void);
10243 +extern void free_task_struct(struct task_struct *);
10244 +
10245 #endif
10246 #endif /* _ASM_X86_THREAD_INFO_H */
10247 diff -urNp linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h
10248 --- linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10249 +++ linux-2.6.32.42/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10250 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10251 static __always_inline unsigned long __must_check
10252 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10253 {
10254 + pax_track_stack();
10255 +
10256 + if ((long)n < 0)
10257 + return n;
10258 +
10259 if (__builtin_constant_p(n)) {
10260 unsigned long ret;
10261
10262 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10263 return ret;
10264 }
10265 }
10266 + if (!__builtin_constant_p(n))
10267 + check_object_size(from, n, true);
10268 return __copy_to_user_ll(to, from, n);
10269 }
10270
10271 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10272 __copy_to_user(void __user *to, const void *from, unsigned long n)
10273 {
10274 might_fault();
10275 +
10276 return __copy_to_user_inatomic(to, from, n);
10277 }
10278
10279 static __always_inline unsigned long
10280 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10281 {
10282 + if ((long)n < 0)
10283 + return n;
10284 +
10285 /* Avoid zeroing the tail if the copy fails..
10286 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10287 * but as the zeroing behaviour is only significant when n is not
10288 @@ -138,6 +149,12 @@ static __always_inline unsigned long
10289 __copy_from_user(void *to, const void __user *from, unsigned long n)
10290 {
10291 might_fault();
10292 +
10293 + pax_track_stack();
10294 +
10295 + if ((long)n < 0)
10296 + return n;
10297 +
10298 if (__builtin_constant_p(n)) {
10299 unsigned long ret;
10300
10301 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10302 return ret;
10303 }
10304 }
10305 + if (!__builtin_constant_p(n))
10306 + check_object_size(to, n, false);
10307 return __copy_from_user_ll(to, from, n);
10308 }
10309
10310 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10311 const void __user *from, unsigned long n)
10312 {
10313 might_fault();
10314 +
10315 + if ((long)n < 0)
10316 + return n;
10317 +
10318 if (__builtin_constant_p(n)) {
10319 unsigned long ret;
10320
10321 @@ -182,14 +205,62 @@ static __always_inline unsigned long
10322 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10323 unsigned long n)
10324 {
10325 - return __copy_from_user_ll_nocache_nozero(to, from, n);
10326 + if ((long)n < 0)
10327 + return n;
10328 +
10329 + return __copy_from_user_ll_nocache_nozero(to, from, n);
10330 +}
10331 +
10332 +/**
10333 + * copy_to_user: - Copy a block of data into user space.
10334 + * @to: Destination address, in user space.
10335 + * @from: Source address, in kernel space.
10336 + * @n: Number of bytes to copy.
10337 + *
10338 + * Context: User context only. This function may sleep.
10339 + *
10340 + * Copy data from kernel space to user space.
10341 + *
10342 + * Returns number of bytes that could not be copied.
10343 + * On success, this will be zero.
10344 + */
10345 +static __always_inline unsigned long __must_check
10346 +copy_to_user(void __user *to, const void *from, unsigned long n)
10347 +{
10348 + if (access_ok(VERIFY_WRITE, to, n))
10349 + n = __copy_to_user(to, from, n);
10350 + return n;
10351 +}
10352 +
10353 +/**
10354 + * copy_from_user: - Copy a block of data from user space.
10355 + * @to: Destination address, in kernel space.
10356 + * @from: Source address, in user space.
10357 + * @n: Number of bytes to copy.
10358 + *
10359 + * Context: User context only. This function may sleep.
10360 + *
10361 + * Copy data from user space to kernel space.
10362 + *
10363 + * Returns number of bytes that could not be copied.
10364 + * On success, this will be zero.
10365 + *
10366 + * If some data could not be copied, this function will pad the copied
10367 + * data to the requested size using zero bytes.
10368 + */
10369 +static __always_inline unsigned long __must_check
10370 +copy_from_user(void *to, const void __user *from, unsigned long n)
10371 +{
10372 + if (access_ok(VERIFY_READ, from, n))
10373 + n = __copy_from_user(to, from, n);
10374 + else if ((long)n > 0) {
10375 + if (!__builtin_constant_p(n))
10376 + check_object_size(to, n, false);
10377 + memset(to, 0, n);
10378 + }
10379 + return n;
10380 }
10381
10382 -unsigned long __must_check copy_to_user(void __user *to,
10383 - const void *from, unsigned long n);
10384 -unsigned long __must_check copy_from_user(void *to,
10385 - const void __user *from,
10386 - unsigned long n);
10387 long __must_check strncpy_from_user(char *dst, const char __user *src,
10388 long count);
10389 long __must_check __strncpy_from_user(char *dst,
10390 diff -urNp linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h
10391 --- linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10392 +++ linux-2.6.32.42/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10393 @@ -9,6 +9,9 @@
10394 #include <linux/prefetch.h>
10395 #include <linux/lockdep.h>
10396 #include <asm/page.h>
10397 +#include <asm/pgtable.h>
10398 +
10399 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
10400
10401 /*
10402 * Copy To/From Userspace
10403 @@ -19,113 +22,203 @@ __must_check unsigned long
10404 copy_user_generic(void *to, const void *from, unsigned len);
10405
10406 __must_check unsigned long
10407 -copy_to_user(void __user *to, const void *from, unsigned len);
10408 -__must_check unsigned long
10409 -copy_from_user(void *to, const void __user *from, unsigned len);
10410 -__must_check unsigned long
10411 copy_in_user(void __user *to, const void __user *from, unsigned len);
10412
10413 static __always_inline __must_check
10414 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
10415 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10416 {
10417 - int ret = 0;
10418 + unsigned ret = 0;
10419
10420 might_fault();
10421 - if (!__builtin_constant_p(size))
10422 - return copy_user_generic(dst, (__force void *)src, size);
10423 +
10424 + if ((int)size < 0)
10425 + return size;
10426 +
10427 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10428 + if (!__access_ok(VERIFY_READ, src, size))
10429 + return size;
10430 +#endif
10431 +
10432 + if (!__builtin_constant_p(size)) {
10433 + check_object_size(dst, size, false);
10434 +
10435 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10436 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10437 + src += PAX_USER_SHADOW_BASE;
10438 +#endif
10439 +
10440 + return copy_user_generic(dst, (__force const void *)src, size);
10441 + }
10442 switch (size) {
10443 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10444 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10445 ret, "b", "b", "=q", 1);
10446 return ret;
10447 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10448 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10449 ret, "w", "w", "=r", 2);
10450 return ret;
10451 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10452 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10453 ret, "l", "k", "=r", 4);
10454 return ret;
10455 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10456 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10457 ret, "q", "", "=r", 8);
10458 return ret;
10459 case 10:
10460 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10461 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10462 ret, "q", "", "=r", 10);
10463 if (unlikely(ret))
10464 return ret;
10465 __get_user_asm(*(u16 *)(8 + (char *)dst),
10466 - (u16 __user *)(8 + (char __user *)src),
10467 + (const u16 __user *)(8 + (const char __user *)src),
10468 ret, "w", "w", "=r", 2);
10469 return ret;
10470 case 16:
10471 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10472 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10473 ret, "q", "", "=r", 16);
10474 if (unlikely(ret))
10475 return ret;
10476 __get_user_asm(*(u64 *)(8 + (char *)dst),
10477 - (u64 __user *)(8 + (char __user *)src),
10478 + (const u64 __user *)(8 + (const char __user *)src),
10479 ret, "q", "", "=r", 8);
10480 return ret;
10481 default:
10482 - return copy_user_generic(dst, (__force void *)src, size);
10483 +
10484 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10485 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10486 + src += PAX_USER_SHADOW_BASE;
10487 +#endif
10488 +
10489 + return copy_user_generic(dst, (__force const void *)src, size);
10490 }
10491 }
10492
10493 static __always_inline __must_check
10494 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
10495 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10496 {
10497 - int ret = 0;
10498 + unsigned ret = 0;
10499
10500 might_fault();
10501 - if (!__builtin_constant_p(size))
10502 +
10503 + pax_track_stack();
10504 +
10505 + if ((int)size < 0)
10506 + return size;
10507 +
10508 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10509 + if (!__access_ok(VERIFY_WRITE, dst, size))
10510 + return size;
10511 +#endif
10512 +
10513 + if (!__builtin_constant_p(size)) {
10514 + check_object_size(src, size, true);
10515 +
10516 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10517 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10518 + dst += PAX_USER_SHADOW_BASE;
10519 +#endif
10520 +
10521 return copy_user_generic((__force void *)dst, src, size);
10522 + }
10523 switch (size) {
10524 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10525 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10526 ret, "b", "b", "iq", 1);
10527 return ret;
10528 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10529 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10530 ret, "w", "w", "ir", 2);
10531 return ret;
10532 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10533 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10534 ret, "l", "k", "ir", 4);
10535 return ret;
10536 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10537 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10538 ret, "q", "", "er", 8);
10539 return ret;
10540 case 10:
10541 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10542 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10543 ret, "q", "", "er", 10);
10544 if (unlikely(ret))
10545 return ret;
10546 asm("":::"memory");
10547 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10548 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10549 ret, "w", "w", "ir", 2);
10550 return ret;
10551 case 16:
10552 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10553 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10554 ret, "q", "", "er", 16);
10555 if (unlikely(ret))
10556 return ret;
10557 asm("":::"memory");
10558 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10559 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10560 ret, "q", "", "er", 8);
10561 return ret;
10562 default:
10563 +
10564 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10565 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10566 + dst += PAX_USER_SHADOW_BASE;
10567 +#endif
10568 +
10569 return copy_user_generic((__force void *)dst, src, size);
10570 }
10571 }
10572
10573 static __always_inline __must_check
10574 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10575 +unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
10576 +{
10577 + if (access_ok(VERIFY_WRITE, to, len))
10578 + len = __copy_to_user(to, from, len);
10579 + return len;
10580 +}
10581 +
10582 +static __always_inline __must_check
10583 +unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
10584 +{
10585 + if ((int)len < 0)
10586 + return len;
10587 +
10588 + if (access_ok(VERIFY_READ, from, len))
10589 + len = __copy_from_user(to, from, len);
10590 + else if ((int)len > 0) {
10591 + if (!__builtin_constant_p(len))
10592 + check_object_size(to, len, false);
10593 + memset(to, 0, len);
10594 + }
10595 + return len;
10596 +}
10597 +
10598 +static __always_inline __must_check
10599 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10600 {
10601 - int ret = 0;
10602 + unsigned ret = 0;
10603
10604 might_fault();
10605 - if (!__builtin_constant_p(size))
10606 +
10607 + pax_track_stack();
10608 +
10609 + if ((int)size < 0)
10610 + return size;
10611 +
10612 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10613 + if (!__access_ok(VERIFY_READ, src, size))
10614 + return size;
10615 + if (!__access_ok(VERIFY_WRITE, dst, size))
10616 + return size;
10617 +#endif
10618 +
10619 + if (!__builtin_constant_p(size)) {
10620 +
10621 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10622 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10623 + src += PAX_USER_SHADOW_BASE;
10624 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10625 + dst += PAX_USER_SHADOW_BASE;
10626 +#endif
10627 +
10628 return copy_user_generic((__force void *)dst,
10629 - (__force void *)src, size);
10630 + (__force const void *)src, size);
10631 + }
10632 switch (size) {
10633 case 1: {
10634 u8 tmp;
10635 - __get_user_asm(tmp, (u8 __user *)src,
10636 + __get_user_asm(tmp, (const u8 __user *)src,
10637 ret, "b", "b", "=q", 1);
10638 if (likely(!ret))
10639 __put_user_asm(tmp, (u8 __user *)dst,
10640 @@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
10641 }
10642 case 2: {
10643 u16 tmp;
10644 - __get_user_asm(tmp, (u16 __user *)src,
10645 + __get_user_asm(tmp, (const u16 __user *)src,
10646 ret, "w", "w", "=r", 2);
10647 if (likely(!ret))
10648 __put_user_asm(tmp, (u16 __user *)dst,
10649 @@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
10650
10651 case 4: {
10652 u32 tmp;
10653 - __get_user_asm(tmp, (u32 __user *)src,
10654 + __get_user_asm(tmp, (const u32 __user *)src,
10655 ret, "l", "k", "=r", 4);
10656 if (likely(!ret))
10657 __put_user_asm(tmp, (u32 __user *)dst,
10658 @@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
10659 }
10660 case 8: {
10661 u64 tmp;
10662 - __get_user_asm(tmp, (u64 __user *)src,
10663 + __get_user_asm(tmp, (const u64 __user *)src,
10664 ret, "q", "", "=r", 8);
10665 if (likely(!ret))
10666 __put_user_asm(tmp, (u64 __user *)dst,
10667 @@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
10668 return ret;
10669 }
10670 default:
10671 +
10672 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10673 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10674 + src += PAX_USER_SHADOW_BASE;
10675 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10676 + dst += PAX_USER_SHADOW_BASE;
10677 +#endif
10678 +
10679 return copy_user_generic((__force void *)dst,
10680 - (__force void *)src, size);
10681 + (__force const void *)src, size);
10682 }
10683 }
10684
10685 @@ -176,33 +277,75 @@ __must_check long strlen_user(const char
10686 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
10687 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
10688
10689 -__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
10690 - unsigned size);
10691 +static __must_check __always_inline unsigned long
10692 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
10693 +{
10694 + pax_track_stack();
10695 +
10696 + if ((int)size < 0)
10697 + return size;
10698
10699 -static __must_check __always_inline int
10700 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10701 + if (!__access_ok(VERIFY_READ, src, size))
10702 + return size;
10703 +
10704 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10705 + src += PAX_USER_SHADOW_BASE;
10706 +#endif
10707 +
10708 + return copy_user_generic(dst, (__force const void *)src, size);
10709 +}
10710 +
10711 +static __must_check __always_inline unsigned long
10712 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
10713 {
10714 + if ((int)size < 0)
10715 + return size;
10716 +
10717 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10718 + if (!__access_ok(VERIFY_WRITE, dst, size))
10719 + return size;
10720 +
10721 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10722 + dst += PAX_USER_SHADOW_BASE;
10723 +#endif
10724 +
10725 return copy_user_generic((__force void *)dst, src, size);
10726 }
10727
10728 -extern long __copy_user_nocache(void *dst, const void __user *src,
10729 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
10730 unsigned size, int zerorest);
10731
10732 -static inline int
10733 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10734 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10735 {
10736 might_sleep();
10737 +
10738 + if ((int)size < 0)
10739 + return size;
10740 +
10741 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10742 + if (!__access_ok(VERIFY_READ, src, size))
10743 + return size;
10744 +#endif
10745 +
10746 return __copy_user_nocache(dst, src, size, 1);
10747 }
10748
10749 -static inline int
10750 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10751 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10752 unsigned size)
10753 {
10754 + if ((int)size < 0)
10755 + return size;
10756 +
10757 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10758 + if (!__access_ok(VERIFY_READ, src, size))
10759 + return size;
10760 +#endif
10761 +
10762 return __copy_user_nocache(dst, src, size, 0);
10763 }
10764
10765 -unsigned long
10766 +extern unsigned long
10767 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
10768
10769 #endif /* _ASM_X86_UACCESS_64_H */
10770 diff -urNp linux-2.6.32.42/arch/x86/include/asm/uaccess.h linux-2.6.32.42/arch/x86/include/asm/uaccess.h
10771 --- linux-2.6.32.42/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
10772 +++ linux-2.6.32.42/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
10773 @@ -8,12 +8,15 @@
10774 #include <linux/thread_info.h>
10775 #include <linux/prefetch.h>
10776 #include <linux/string.h>
10777 +#include <linux/sched.h>
10778 #include <asm/asm.h>
10779 #include <asm/page.h>
10780
10781 #define VERIFY_READ 0
10782 #define VERIFY_WRITE 1
10783
10784 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
10785 +
10786 /*
10787 * The fs value determines whether argument validity checking should be
10788 * performed or not. If get_fs() == USER_DS, checking is performed, with
10789 @@ -29,7 +32,12 @@
10790
10791 #define get_ds() (KERNEL_DS)
10792 #define get_fs() (current_thread_info()->addr_limit)
10793 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10794 +void __set_fs(mm_segment_t x);
10795 +void set_fs(mm_segment_t x);
10796 +#else
10797 #define set_fs(x) (current_thread_info()->addr_limit = (x))
10798 +#endif
10799
10800 #define segment_eq(a, b) ((a).seg == (b).seg)
10801
10802 @@ -77,7 +85,33 @@
10803 * checks that the pointer is in the user space range - after calling
10804 * this function, memory access functions may still return -EFAULT.
10805 */
10806 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10807 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
10808 +#define access_ok(type, addr, size) \
10809 +({ \
10810 + long __size = size; \
10811 + unsigned long __addr = (unsigned long)addr; \
10812 + unsigned long __addr_ao = __addr & PAGE_MASK; \
10813 + unsigned long __end_ao = __addr + __size - 1; \
10814 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
10815 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
10816 + while(__addr_ao <= __end_ao) { \
10817 + char __c_ao; \
10818 + __addr_ao += PAGE_SIZE; \
10819 + if (__size > PAGE_SIZE) \
10820 + cond_resched(); \
10821 + if (__get_user(__c_ao, (char __user *)__addr)) \
10822 + break; \
10823 + if (type != VERIFY_WRITE) { \
10824 + __addr = __addr_ao; \
10825 + continue; \
10826 + } \
10827 + if (__put_user(__c_ao, (char __user *)__addr)) \
10828 + break; \
10829 + __addr = __addr_ao; \
10830 + } \
10831 + } \
10832 + __ret_ao; \
10833 +})
10834
10835 /*
10836 * The exception table consists of pairs of addresses: the first is the
10837 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
10838 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
10839 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
10840
10841 -
10842 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
10843 +#define __copyuser_seg "gs;"
10844 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
10845 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
10846 +#else
10847 +#define __copyuser_seg
10848 +#define __COPYUSER_SET_ES
10849 +#define __COPYUSER_RESTORE_ES
10850 +#endif
10851
10852 #ifdef CONFIG_X86_32
10853 #define __put_user_asm_u64(x, addr, err, errret) \
10854 - asm volatile("1: movl %%eax,0(%2)\n" \
10855 - "2: movl %%edx,4(%2)\n" \
10856 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
10857 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
10858 "3:\n" \
10859 ".section .fixup,\"ax\"\n" \
10860 "4: movl %3,%0\n" \
10861 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
10862 : "A" (x), "r" (addr), "i" (errret), "0" (err))
10863
10864 #define __put_user_asm_ex_u64(x, addr) \
10865 - asm volatile("1: movl %%eax,0(%1)\n" \
10866 - "2: movl %%edx,4(%1)\n" \
10867 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
10868 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
10869 "3:\n" \
10870 _ASM_EXTABLE(1b, 2b - 1b) \
10871 _ASM_EXTABLE(2b, 3b - 2b) \
10872 @@ -374,7 +416,7 @@ do { \
10873 } while (0)
10874
10875 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10876 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
10877 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
10878 "2:\n" \
10879 ".section .fixup,\"ax\"\n" \
10880 "3: mov %3,%0\n" \
10881 @@ -382,7 +424,7 @@ do { \
10882 " jmp 2b\n" \
10883 ".previous\n" \
10884 _ASM_EXTABLE(1b, 3b) \
10885 - : "=r" (err), ltype(x) \
10886 + : "=r" (err), ltype (x) \
10887 : "m" (__m(addr)), "i" (errret), "0" (err))
10888
10889 #define __get_user_size_ex(x, ptr, size) \
10890 @@ -407,7 +449,7 @@ do { \
10891 } while (0)
10892
10893 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
10894 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
10895 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
10896 "2:\n" \
10897 _ASM_EXTABLE(1b, 2b - 1b) \
10898 : ltype(x) : "m" (__m(addr)))
10899 @@ -424,13 +466,24 @@ do { \
10900 int __gu_err; \
10901 unsigned long __gu_val; \
10902 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
10903 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
10904 + (x) = (__typeof__(*(ptr)))__gu_val; \
10905 __gu_err; \
10906 })
10907
10908 /* FIXME: this hack is definitely wrong -AK */
10909 struct __large_struct { unsigned long buf[100]; };
10910 -#define __m(x) (*(struct __large_struct __user *)(x))
10911 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10912 +#define ____m(x) \
10913 +({ \
10914 + unsigned long ____x = (unsigned long)(x); \
10915 + if (____x < PAX_USER_SHADOW_BASE) \
10916 + ____x += PAX_USER_SHADOW_BASE; \
10917 + (void __user *)____x; \
10918 +})
10919 +#else
10920 +#define ____m(x) (x)
10921 +#endif
10922 +#define __m(x) (*(struct __large_struct __user *)____m(x))
10923
10924 /*
10925 * Tell gcc we read from memory instead of writing: this is because
10926 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
10927 * aliasing issues.
10928 */
10929 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
10930 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
10931 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
10932 "2:\n" \
10933 ".section .fixup,\"ax\"\n" \
10934 "3: mov %3,%0\n" \
10935 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
10936 ".previous\n" \
10937 _ASM_EXTABLE(1b, 3b) \
10938 : "=r"(err) \
10939 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
10940 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
10941
10942 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
10943 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
10944 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
10945 "2:\n" \
10946 _ASM_EXTABLE(1b, 2b - 1b) \
10947 : : ltype(x), "m" (__m(addr)))
10948 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
10949 * On error, the variable @x is set to zero.
10950 */
10951
10952 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10953 +#define __get_user(x, ptr) get_user((x), (ptr))
10954 +#else
10955 #define __get_user(x, ptr) \
10956 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
10957 +#endif
10958
10959 /**
10960 * __put_user: - Write a simple value into user space, with less checking.
10961 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
10962 * Returns zero on success, or -EFAULT on error.
10963 */
10964
10965 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10966 +#define __put_user(x, ptr) put_user((x), (ptr))
10967 +#else
10968 #define __put_user(x, ptr) \
10969 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
10970 +#endif
10971
10972 #define __get_user_unaligned __get_user
10973 #define __put_user_unaligned __put_user
10974 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
10975 #define get_user_ex(x, ptr) do { \
10976 unsigned long __gue_val; \
10977 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
10978 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
10979 + (x) = (__typeof__(*(ptr)))__gue_val; \
10980 } while (0)
10981
10982 #ifdef CONFIG_X86_WP_WORKS_OK
10983 @@ -567,6 +628,7 @@ extern struct movsl_mask {
10984
10985 #define ARCH_HAS_NOCACHE_UACCESS 1
10986
10987 +#define ARCH_HAS_SORT_EXTABLE
10988 #ifdef CONFIG_X86_32
10989 # include "uaccess_32.h"
10990 #else
10991 diff -urNp linux-2.6.32.42/arch/x86/include/asm/vgtod.h linux-2.6.32.42/arch/x86/include/asm/vgtod.h
10992 --- linux-2.6.32.42/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
10993 +++ linux-2.6.32.42/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
10994 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
10995 int sysctl_enabled;
10996 struct timezone sys_tz;
10997 struct { /* extract of a clocksource struct */
10998 + char name[8];
10999 cycle_t (*vread)(void);
11000 cycle_t cycle_last;
11001 cycle_t mask;
11002 diff -urNp linux-2.6.32.42/arch/x86/include/asm/vmi.h linux-2.6.32.42/arch/x86/include/asm/vmi.h
11003 --- linux-2.6.32.42/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11004 +++ linux-2.6.32.42/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11005 @@ -191,6 +191,7 @@ struct vrom_header {
11006 u8 reserved[96]; /* Reserved for headers */
11007 char vmi_init[8]; /* VMI_Init jump point */
11008 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11009 + char rom_data[8048]; /* rest of the option ROM */
11010 } __attribute__((packed));
11011
11012 struct pnp_header {
11013 diff -urNp linux-2.6.32.42/arch/x86/include/asm/vsyscall.h linux-2.6.32.42/arch/x86/include/asm/vsyscall.h
11014 --- linux-2.6.32.42/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11015 +++ linux-2.6.32.42/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11016 @@ -15,9 +15,10 @@ enum vsyscall_num {
11017
11018 #ifdef __KERNEL__
11019 #include <linux/seqlock.h>
11020 +#include <linux/getcpu.h>
11021 +#include <linux/time.h>
11022
11023 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11024 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11025
11026 /* Definitions for CONFIG_GENERIC_TIME definitions */
11027 #define __section_vsyscall_gtod_data __attribute__ \
11028 @@ -31,7 +32,6 @@ enum vsyscall_num {
11029 #define VGETCPU_LSL 2
11030
11031 extern int __vgetcpu_mode;
11032 -extern volatile unsigned long __jiffies;
11033
11034 /* kernel space (writeable) */
11035 extern int vgetcpu_mode;
11036 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11037
11038 extern void map_vsyscall(void);
11039
11040 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11041 +extern time_t vtime(time_t *t);
11042 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11043 #endif /* __KERNEL__ */
11044
11045 #endif /* _ASM_X86_VSYSCALL_H */
11046 diff -urNp linux-2.6.32.42/arch/x86/include/asm/xsave.h linux-2.6.32.42/arch/x86/include/asm/xsave.h
11047 --- linux-2.6.32.42/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11048 +++ linux-2.6.32.42/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11049 @@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11050 static inline int xsave_user(struct xsave_struct __user *buf)
11051 {
11052 int err;
11053 +
11054 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11055 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11056 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11057 +#endif
11058 +
11059 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11060 "2:\n"
11061 ".section .fixup,\"ax\"\n"
11062 @@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11063 u32 lmask = mask;
11064 u32 hmask = mask >> 32;
11065
11066 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11067 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11068 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11069 +#endif
11070 +
11071 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11072 "2:\n"
11073 ".section .fixup,\"ax\"\n"
11074 diff -urNp linux-2.6.32.42/arch/x86/Kconfig linux-2.6.32.42/arch/x86/Kconfig
11075 --- linux-2.6.32.42/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11076 +++ linux-2.6.32.42/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11077 @@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11078
11079 config X86_32_LAZY_GS
11080 def_bool y
11081 - depends on X86_32 && !CC_STACKPROTECTOR
11082 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11083
11084 config KTIME_SCALAR
11085 def_bool X86_32
11086 @@ -1008,7 +1008,7 @@ choice
11087
11088 config NOHIGHMEM
11089 bool "off"
11090 - depends on !X86_NUMAQ
11091 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11092 ---help---
11093 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11094 However, the address space of 32-bit x86 processors is only 4
11095 @@ -1045,7 +1045,7 @@ config NOHIGHMEM
11096
11097 config HIGHMEM4G
11098 bool "4GB"
11099 - depends on !X86_NUMAQ
11100 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11101 ---help---
11102 Select this if you have a 32-bit processor and between 1 and 4
11103 gigabytes of physical RAM.
11104 @@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11105 hex
11106 default 0xB0000000 if VMSPLIT_3G_OPT
11107 default 0x80000000 if VMSPLIT_2G
11108 - default 0x78000000 if VMSPLIT_2G_OPT
11109 + default 0x70000000 if VMSPLIT_2G_OPT
11110 default 0x40000000 if VMSPLIT_1G
11111 default 0xC0000000
11112 depends on X86_32
11113 @@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11114
11115 config EFI
11116 bool "EFI runtime service support"
11117 - depends on ACPI
11118 + depends on ACPI && !PAX_KERNEXEC
11119 ---help---
11120 This enables the kernel to use EFI runtime services that are
11121 available (such as the EFI variable services).
11122 @@ -1460,6 +1460,7 @@ config SECCOMP
11123
11124 config CC_STACKPROTECTOR
11125 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11126 + depends on X86_64 || !PAX_MEMORY_UDEREF
11127 ---help---
11128 This option turns on the -fstack-protector GCC feature. This
11129 feature puts, at the beginning of functions, a canary value on
11130 @@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11131 config PHYSICAL_START
11132 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11133 default "0x1000000"
11134 + range 0x400000 0x40000000
11135 ---help---
11136 This gives the physical address where the kernel is loaded.
11137
11138 @@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11139 hex
11140 prompt "Alignment value to which kernel should be aligned" if X86_32
11141 default "0x1000000"
11142 + range 0x400000 0x1000000 if PAX_KERNEXEC
11143 range 0x2000 0x1000000
11144 ---help---
11145 This value puts the alignment restrictions on physical address
11146 @@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11147 Say N if you want to disable CPU hotplug.
11148
11149 config COMPAT_VDSO
11150 - def_bool y
11151 + def_bool n
11152 prompt "Compat VDSO support"
11153 depends on X86_32 || IA32_EMULATION
11154 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11155 ---help---
11156 Map the 32-bit VDSO to the predictable old-style address too.
11157 ---help---
11158 diff -urNp linux-2.6.32.42/arch/x86/Kconfig.cpu linux-2.6.32.42/arch/x86/Kconfig.cpu
11159 --- linux-2.6.32.42/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11160 +++ linux-2.6.32.42/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11161 @@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11162
11163 config X86_F00F_BUG
11164 def_bool y
11165 - depends on M586MMX || M586TSC || M586 || M486 || M386
11166 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11167
11168 config X86_WP_WORKS_OK
11169 def_bool y
11170 @@ -360,7 +360,7 @@ config X86_POPAD_OK
11171
11172 config X86_ALIGNMENT_16
11173 def_bool y
11174 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11175 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11176
11177 config X86_INTEL_USERCOPY
11178 def_bool y
11179 @@ -406,7 +406,7 @@ config X86_CMPXCHG64
11180 # generates cmov.
11181 config X86_CMOV
11182 def_bool y
11183 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11184 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11185
11186 config X86_MINIMUM_CPU_FAMILY
11187 int
11188 diff -urNp linux-2.6.32.42/arch/x86/Kconfig.debug linux-2.6.32.42/arch/x86/Kconfig.debug
11189 --- linux-2.6.32.42/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11190 +++ linux-2.6.32.42/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11191 @@ -99,7 +99,7 @@ config X86_PTDUMP
11192 config DEBUG_RODATA
11193 bool "Write protect kernel read-only data structures"
11194 default y
11195 - depends on DEBUG_KERNEL
11196 + depends on DEBUG_KERNEL && BROKEN
11197 ---help---
11198 Mark the kernel read-only data as write-protected in the pagetables,
11199 in order to catch accidental (and incorrect) writes to such const
11200 diff -urNp linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S
11201 --- linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11202 +++ linux-2.6.32.42/arch/x86/kernel/acpi/realmode/wakeup.S 2011-04-17 15:56:46.000000000 -0400
11203 @@ -104,7 +104,7 @@ _start:
11204 movl %eax, %ecx
11205 orl %edx, %ecx
11206 jz 1f
11207 - movl $0xc0000080, %ecx
11208 + mov $MSR_EFER, %ecx
11209 wrmsr
11210 1:
11211
11212 diff -urNp linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c
11213 --- linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11214 +++ linux-2.6.32.42/arch/x86/kernel/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
11215 @@ -11,11 +11,12 @@
11216 #include <linux/cpumask.h>
11217 #include <asm/segment.h>
11218 #include <asm/desc.h>
11219 +#include <asm/e820.h>
11220
11221 #include "realmode/wakeup.h"
11222 #include "sleep.h"
11223
11224 -unsigned long acpi_wakeup_address;
11225 +unsigned long acpi_wakeup_address = 0x2000;
11226 unsigned long acpi_realmode_flags;
11227
11228 /* address in low memory of the wakeup routine. */
11229 @@ -99,8 +100,12 @@ int acpi_save_state_mem(void)
11230 header->trampoline_segment = setup_trampoline() >> 4;
11231 #ifdef CONFIG_SMP
11232 stack_start.sp = temp_stack + sizeof(temp_stack);
11233 +
11234 + pax_open_kernel();
11235 early_gdt_descr.address =
11236 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11237 + pax_close_kernel();
11238 +
11239 initial_gs = per_cpu_offset(smp_processor_id());
11240 #endif
11241 initial_code = (unsigned long)wakeup_long64;
11242 @@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11243 return;
11244 }
11245
11246 - acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11247 -
11248 - if (!acpi_realmode) {
11249 - printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11250 - return;
11251 - }
11252 -
11253 - acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11254 + reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11255 + acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11256 }
11257
11258
11259 diff -urNp linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S
11260 --- linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11261 +++ linux-2.6.32.42/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11262 @@ -30,13 +30,11 @@ wakeup_pmode_return:
11263 # and restore the stack ... but you need gdt for this to work
11264 movl saved_context_esp, %esp
11265
11266 - movl %cs:saved_magic, %eax
11267 - cmpl $0x12345678, %eax
11268 + cmpl $0x12345678, saved_magic
11269 jne bogus_magic
11270
11271 # jump to place where we left off
11272 - movl saved_eip, %eax
11273 - jmp *%eax
11274 + jmp *(saved_eip)
11275
11276 bogus_magic:
11277 jmp bogus_magic
11278 diff -urNp linux-2.6.32.42/arch/x86/kernel/alternative.c linux-2.6.32.42/arch/x86/kernel/alternative.c
11279 --- linux-2.6.32.42/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11280 +++ linux-2.6.32.42/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11281 @@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11282
11283 BUG_ON(p->len > MAX_PATCH_LEN);
11284 /* prep the buffer with the original instructions */
11285 - memcpy(insnbuf, p->instr, p->len);
11286 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11287 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11288 (unsigned long)p->instr, p->len);
11289
11290 @@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11291 if (smp_alt_once)
11292 free_init_pages("SMP alternatives",
11293 (unsigned long)__smp_locks,
11294 - (unsigned long)__smp_locks_end);
11295 + PAGE_ALIGN((unsigned long)__smp_locks_end));
11296
11297 restart_nmi();
11298 }
11299 @@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11300 * instructions. And on the local CPU you need to be protected again NMI or MCE
11301 * handlers seeing an inconsistent instruction while you patch.
11302 */
11303 -static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11304 +static void *__kprobes text_poke_early(void *addr, const void *opcode,
11305 size_t len)
11306 {
11307 unsigned long flags;
11308 local_irq_save(flags);
11309 - memcpy(addr, opcode, len);
11310 +
11311 + pax_open_kernel();
11312 + memcpy(ktla_ktva(addr), opcode, len);
11313 sync_core();
11314 + pax_close_kernel();
11315 +
11316 local_irq_restore(flags);
11317 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11318 that causes hangs on some VIA CPUs. */
11319 @@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11320 */
11321 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11322 {
11323 - unsigned long flags;
11324 - char *vaddr;
11325 + unsigned char *vaddr = ktla_ktva(addr);
11326 struct page *pages[2];
11327 - int i;
11328 + size_t i;
11329
11330 if (!core_kernel_text((unsigned long)addr)) {
11331 - pages[0] = vmalloc_to_page(addr);
11332 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11333 + pages[0] = vmalloc_to_page(vaddr);
11334 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11335 } else {
11336 - pages[0] = virt_to_page(addr);
11337 + pages[0] = virt_to_page(vaddr);
11338 WARN_ON(!PageReserved(pages[0]));
11339 - pages[1] = virt_to_page(addr + PAGE_SIZE);
11340 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11341 }
11342 BUG_ON(!pages[0]);
11343 - local_irq_save(flags);
11344 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11345 - if (pages[1])
11346 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11347 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11348 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11349 - clear_fixmap(FIX_TEXT_POKE0);
11350 - if (pages[1])
11351 - clear_fixmap(FIX_TEXT_POKE1);
11352 - local_flush_tlb();
11353 - sync_core();
11354 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
11355 - that causes hangs on some VIA CPUs. */
11356 + text_poke_early(addr, opcode, len);
11357 for (i = 0; i < len; i++)
11358 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11359 - local_irq_restore(flags);
11360 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11361 return addr;
11362 }
11363 diff -urNp linux-2.6.32.42/arch/x86/kernel/amd_iommu.c linux-2.6.32.42/arch/x86/kernel/amd_iommu.c
11364 --- linux-2.6.32.42/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11365 +++ linux-2.6.32.42/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11366 @@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11367 }
11368 }
11369
11370 -static struct dma_map_ops amd_iommu_dma_ops = {
11371 +static const struct dma_map_ops amd_iommu_dma_ops = {
11372 .alloc_coherent = alloc_coherent,
11373 .free_coherent = free_coherent,
11374 .map_page = map_page,
11375 diff -urNp linux-2.6.32.42/arch/x86/kernel/apic/apic.c linux-2.6.32.42/arch/x86/kernel/apic/apic.c
11376 --- linux-2.6.32.42/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11377 +++ linux-2.6.32.42/arch/x86/kernel/apic/apic.c 2011-05-16 21:46:57.000000000 -0400
11378 @@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11379 apic_write(APIC_ESR, 0);
11380 v1 = apic_read(APIC_ESR);
11381 ack_APIC_irq();
11382 - atomic_inc(&irq_err_count);
11383 + atomic_inc_unchecked(&irq_err_count);
11384
11385 /*
11386 * Here is what the APIC error bits mean:
11387 @@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11388 u16 *bios_cpu_apicid;
11389 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11390
11391 + pax_track_stack();
11392 +
11393 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11394 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11395
11396 diff -urNp linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c
11397 --- linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11398 +++ linux-2.6.32.42/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11399 @@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11400 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11401 GFP_ATOMIC);
11402 if (!ioapic_entries)
11403 - return 0;
11404 + return NULL;
11405
11406 for (apic = 0; apic < nr_ioapics; apic++) {
11407 ioapic_entries[apic] =
11408 @@ -733,7 +733,7 @@ nomem:
11409 kfree(ioapic_entries[apic]);
11410 kfree(ioapic_entries);
11411
11412 - return 0;
11413 + return NULL;
11414 }
11415
11416 /*
11417 @@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11418 }
11419 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11420
11421 -void lock_vector_lock(void)
11422 +void lock_vector_lock(void) __acquires(vector_lock)
11423 {
11424 /* Used to the online set of cpus does not change
11425 * during assign_irq_vector.
11426 @@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11427 spin_lock(&vector_lock);
11428 }
11429
11430 -void unlock_vector_lock(void)
11431 +void unlock_vector_lock(void) __releases(vector_lock)
11432 {
11433 spin_unlock(&vector_lock);
11434 }
11435 @@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
11436 ack_APIC_irq();
11437 }
11438
11439 -atomic_t irq_mis_count;
11440 +atomic_unchecked_t irq_mis_count;
11441
11442 static void ack_apic_level(unsigned int irq)
11443 {
11444 @@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
11445
11446 /* Tail end of version 0x11 I/O APIC bug workaround */
11447 if (!(v & (1 << (i & 0x1f)))) {
11448 - atomic_inc(&irq_mis_count);
11449 + atomic_inc_unchecked(&irq_mis_count);
11450 spin_lock(&ioapic_lock);
11451 __mask_and_edge_IO_APIC_irq(cfg);
11452 __unmask_and_level_IO_APIC_irq(cfg);
11453 diff -urNp linux-2.6.32.42/arch/x86/kernel/apm_32.c linux-2.6.32.42/arch/x86/kernel/apm_32.c
11454 --- linux-2.6.32.42/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
11455 +++ linux-2.6.32.42/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
11456 @@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
11457 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11458 * even though they are called in protected mode.
11459 */
11460 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11461 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11462 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11463
11464 static const char driver_version[] = "1.16ac"; /* no spaces */
11465 @@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
11466 BUG_ON(cpu != 0);
11467 gdt = get_cpu_gdt_table(cpu);
11468 save_desc_40 = gdt[0x40 / 8];
11469 +
11470 + pax_open_kernel();
11471 gdt[0x40 / 8] = bad_bios_desc;
11472 + pax_close_kernel();
11473
11474 apm_irq_save(flags);
11475 APM_DO_SAVE_SEGS;
11476 @@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
11477 &call->esi);
11478 APM_DO_RESTORE_SEGS;
11479 apm_irq_restore(flags);
11480 +
11481 + pax_open_kernel();
11482 gdt[0x40 / 8] = save_desc_40;
11483 + pax_close_kernel();
11484 +
11485 put_cpu();
11486
11487 return call->eax & 0xff;
11488 @@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
11489 BUG_ON(cpu != 0);
11490 gdt = get_cpu_gdt_table(cpu);
11491 save_desc_40 = gdt[0x40 / 8];
11492 +
11493 + pax_open_kernel();
11494 gdt[0x40 / 8] = bad_bios_desc;
11495 + pax_close_kernel();
11496
11497 apm_irq_save(flags);
11498 APM_DO_SAVE_SEGS;
11499 @@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
11500 &call->eax);
11501 APM_DO_RESTORE_SEGS;
11502 apm_irq_restore(flags);
11503 +
11504 + pax_open_kernel();
11505 gdt[0x40 / 8] = save_desc_40;
11506 + pax_close_kernel();
11507 +
11508 put_cpu();
11509 return error;
11510 }
11511 @@ -975,7 +989,7 @@ recalc:
11512
11513 static void apm_power_off(void)
11514 {
11515 - unsigned char po_bios_call[] = {
11516 + const unsigned char po_bios_call[] = {
11517 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
11518 0x8e, 0xd0, /* movw ax,ss */
11519 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
11520 @@ -2357,12 +2371,15 @@ static int __init apm_init(void)
11521 * code to that CPU.
11522 */
11523 gdt = get_cpu_gdt_table(0);
11524 +
11525 + pax_open_kernel();
11526 set_desc_base(&gdt[APM_CS >> 3],
11527 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11528 set_desc_base(&gdt[APM_CS_16 >> 3],
11529 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11530 set_desc_base(&gdt[APM_DS >> 3],
11531 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11532 + pax_close_kernel();
11533
11534 proc_create("apm", 0, NULL, &apm_file_ops);
11535
11536 diff -urNp linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c
11537 --- linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
11538 +++ linux-2.6.32.42/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
11539 @@ -51,7 +51,6 @@ void foo(void)
11540 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
11541 BLANK();
11542
11543 - OFFSET(TI_task, thread_info, task);
11544 OFFSET(TI_exec_domain, thread_info, exec_domain);
11545 OFFSET(TI_flags, thread_info, flags);
11546 OFFSET(TI_status, thread_info, status);
11547 @@ -60,6 +59,8 @@ void foo(void)
11548 OFFSET(TI_restart_block, thread_info, restart_block);
11549 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
11550 OFFSET(TI_cpu, thread_info, cpu);
11551 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11552 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11553 BLANK();
11554
11555 OFFSET(GDS_size, desc_ptr, size);
11556 @@ -99,6 +100,7 @@ void foo(void)
11557
11558 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11559 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11560 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11561 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
11562 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
11563 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
11564 @@ -115,6 +117,11 @@ void foo(void)
11565 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
11566 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11567 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11568 +
11569 +#ifdef CONFIG_PAX_KERNEXEC
11570 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11571 +#endif
11572 +
11573 #endif
11574
11575 #ifdef CONFIG_XEN
11576 diff -urNp linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c
11577 --- linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
11578 +++ linux-2.6.32.42/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
11579 @@ -44,6 +44,8 @@ int main(void)
11580 ENTRY(addr_limit);
11581 ENTRY(preempt_count);
11582 ENTRY(status);
11583 + ENTRY(lowest_stack);
11584 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11585 #ifdef CONFIG_IA32_EMULATION
11586 ENTRY(sysenter_return);
11587 #endif
11588 @@ -63,6 +65,18 @@ int main(void)
11589 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11590 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
11591 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11592 +
11593 +#ifdef CONFIG_PAX_KERNEXEC
11594 + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11595 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11596 +#endif
11597 +
11598 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11599 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11600 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11601 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
11602 +#endif
11603 +
11604 #endif
11605
11606
11607 @@ -115,6 +129,7 @@ int main(void)
11608 ENTRY(cr8);
11609 BLANK();
11610 #undef ENTRY
11611 + DEFINE(TSS_size, sizeof(struct tss_struct));
11612 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
11613 BLANK();
11614 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
11615 @@ -130,6 +145,7 @@ int main(void)
11616
11617 BLANK();
11618 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11619 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11620 #ifdef CONFIG_XEN
11621 BLANK();
11622 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11623 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/amd.c linux-2.6.32.42/arch/x86/kernel/cpu/amd.c
11624 --- linux-2.6.32.42/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
11625 +++ linux-2.6.32.42/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
11626 @@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
11627 unsigned int size)
11628 {
11629 /* AMD errata T13 (order #21922) */
11630 - if ((c->x86 == 6)) {
11631 + if (c->x86 == 6) {
11632 /* Duron Rev A0 */
11633 if (c->x86_model == 3 && c->x86_mask == 0)
11634 size = 64;
11635 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/common.c linux-2.6.32.42/arch/x86/kernel/cpu/common.c
11636 --- linux-2.6.32.42/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
11637 +++ linux-2.6.32.42/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
11638 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
11639
11640 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11641
11642 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11643 -#ifdef CONFIG_X86_64
11644 - /*
11645 - * We need valid kernel segments for data and code in long mode too
11646 - * IRET will check the segment types kkeil 2000/10/28
11647 - * Also sysret mandates a special GDT layout
11648 - *
11649 - * TLS descriptors are currently at a different place compared to i386.
11650 - * Hopefully nobody expects them at a fixed place (Wine?)
11651 - */
11652 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11653 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11654 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11655 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11656 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11657 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11658 -#else
11659 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11660 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11661 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11662 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11663 - /*
11664 - * Segments used for calling PnP BIOS have byte granularity.
11665 - * They code segments and data segments have fixed 64k limits,
11666 - * the transfer segment sizes are set at run time.
11667 - */
11668 - /* 32-bit code */
11669 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11670 - /* 16-bit code */
11671 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11672 - /* 16-bit data */
11673 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11674 - /* 16-bit data */
11675 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11676 - /* 16-bit data */
11677 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11678 - /*
11679 - * The APM segments have byte granularity and their bases
11680 - * are set at run time. All have 64k limits.
11681 - */
11682 - /* 32-bit code */
11683 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11684 - /* 16-bit code */
11685 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11686 - /* data */
11687 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11688 -
11689 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11690 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11691 - GDT_STACK_CANARY_INIT
11692 -#endif
11693 -} };
11694 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11695 -
11696 static int __init x86_xsave_setup(char *s)
11697 {
11698 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
11699 @@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
11700 {
11701 struct desc_ptr gdt_descr;
11702
11703 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11704 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11705 gdt_descr.size = GDT_SIZE - 1;
11706 load_gdt(&gdt_descr);
11707 /* Reload the per-cpu base */
11708 @@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
11709 /* Filter out anything that depends on CPUID levels we don't have */
11710 filter_cpuid_features(c, true);
11711
11712 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
11713 + setup_clear_cpu_cap(X86_FEATURE_SEP);
11714 +#endif
11715 +
11716 /* If the model name is still unset, do table lookup. */
11717 if (!c->x86_model_id[0]) {
11718 const char *p;
11719 @@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
11720 }
11721 __setup("clearcpuid=", setup_disablecpuid);
11722
11723 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11724 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
11725 +
11726 #ifdef CONFIG_X86_64
11727 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11728
11729 @@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
11730 EXPORT_PER_CPU_SYMBOL(current_task);
11731
11732 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11733 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
11734 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
11735 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11736
11737 DEFINE_PER_CPU(char *, irq_stack_ptr) =
11738 @@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
11739 {
11740 memset(regs, 0, sizeof(struct pt_regs));
11741 regs->fs = __KERNEL_PERCPU;
11742 - regs->gs = __KERNEL_STACK_CANARY;
11743 + savesegment(gs, regs->gs);
11744
11745 return regs;
11746 }
11747 @@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
11748 int i;
11749
11750 cpu = stack_smp_processor_id();
11751 - t = &per_cpu(init_tss, cpu);
11752 + t = init_tss + cpu;
11753 orig_ist = &per_cpu(orig_ist, cpu);
11754
11755 #ifdef CONFIG_NUMA
11756 @@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
11757 switch_to_new_gdt(cpu);
11758 loadsegment(fs, 0);
11759
11760 - load_idt((const struct desc_ptr *)&idt_descr);
11761 + load_idt(&idt_descr);
11762
11763 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
11764 syscall_init();
11765 @@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
11766 wrmsrl(MSR_KERNEL_GS_BASE, 0);
11767 barrier();
11768
11769 - check_efer();
11770 if (cpu != 0)
11771 enable_x2apic();
11772
11773 @@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
11774 {
11775 int cpu = smp_processor_id();
11776 struct task_struct *curr = current;
11777 - struct tss_struct *t = &per_cpu(init_tss, cpu);
11778 + struct tss_struct *t = init_tss + cpu;
11779 struct thread_struct *thread = &curr->thread;
11780
11781 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
11782 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/intel.c linux-2.6.32.42/arch/x86/kernel/cpu/intel.c
11783 --- linux-2.6.32.42/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
11784 +++ linux-2.6.32.42/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
11785 @@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
11786 * Update the IDT descriptor and reload the IDT so that
11787 * it uses the read-only mapped virtual address.
11788 */
11789 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
11790 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
11791 load_idt(&idt_descr);
11792 }
11793 #endif
11794 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c
11795 --- linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
11796 +++ linux-2.6.32.42/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
11797 @@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
11798 return ret;
11799 }
11800
11801 -static struct sysfs_ops sysfs_ops = {
11802 +static const struct sysfs_ops sysfs_ops = {
11803 .show = show,
11804 .store = store,
11805 };
11806 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/Makefile linux-2.6.32.42/arch/x86/kernel/cpu/Makefile
11807 --- linux-2.6.32.42/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
11808 +++ linux-2.6.32.42/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
11809 @@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
11810 CFLAGS_REMOVE_common.o = -pg
11811 endif
11812
11813 -# Make sure load_percpu_segment has no stackprotector
11814 -nostackp := $(call cc-option, -fno-stack-protector)
11815 -CFLAGS_common.o := $(nostackp)
11816 -
11817 obj-y := intel_cacheinfo.o addon_cpuid_features.o
11818 obj-y += proc.o capflags.o powerflags.o common.o
11819 obj-y += vmware.o hypervisor.o sched.o
11820 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c
11821 --- linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
11822 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
11823 @@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
11824 return ret;
11825 }
11826
11827 -static struct sysfs_ops threshold_ops = {
11828 +static const struct sysfs_ops threshold_ops = {
11829 .show = show,
11830 .store = store,
11831 };
11832 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c
11833 --- linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
11834 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
11835 @@ -43,6 +43,7 @@
11836 #include <asm/ipi.h>
11837 #include <asm/mce.h>
11838 #include <asm/msr.h>
11839 +#include <asm/local.h>
11840
11841 #include "mce-internal.h"
11842
11843 @@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
11844 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
11845 m->cs, m->ip);
11846
11847 - if (m->cs == __KERNEL_CS)
11848 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
11849 print_symbol("{%s}", m->ip);
11850 pr_cont("\n");
11851 }
11852 @@ -221,10 +222,10 @@ static void print_mce_tail(void)
11853
11854 #define PANIC_TIMEOUT 5 /* 5 seconds */
11855
11856 -static atomic_t mce_paniced;
11857 +static atomic_unchecked_t mce_paniced;
11858
11859 static int fake_panic;
11860 -static atomic_t mce_fake_paniced;
11861 +static atomic_unchecked_t mce_fake_paniced;
11862
11863 /* Panic in progress. Enable interrupts and wait for final IPI */
11864 static void wait_for_panic(void)
11865 @@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
11866 /*
11867 * Make sure only one CPU runs in machine check panic
11868 */
11869 - if (atomic_inc_return(&mce_paniced) > 1)
11870 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
11871 wait_for_panic();
11872 barrier();
11873
11874 @@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
11875 console_verbose();
11876 } else {
11877 /* Don't log too much for fake panic */
11878 - if (atomic_inc_return(&mce_fake_paniced) > 1)
11879 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
11880 return;
11881 }
11882 print_mce_head();
11883 @@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
11884 * might have been modified by someone else.
11885 */
11886 rmb();
11887 - if (atomic_read(&mce_paniced))
11888 + if (atomic_read_unchecked(&mce_paniced))
11889 wait_for_panic();
11890 if (!monarch_timeout)
11891 goto out;
11892 @@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
11893 */
11894
11895 static DEFINE_SPINLOCK(mce_state_lock);
11896 -static int open_count; /* #times opened */
11897 +static local_t open_count; /* #times opened */
11898 static int open_exclu; /* already open exclusive? */
11899
11900 static int mce_open(struct inode *inode, struct file *file)
11901 {
11902 spin_lock(&mce_state_lock);
11903
11904 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
11905 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
11906 spin_unlock(&mce_state_lock);
11907
11908 return -EBUSY;
11909 @@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
11910
11911 if (file->f_flags & O_EXCL)
11912 open_exclu = 1;
11913 - open_count++;
11914 + local_inc(&open_count);
11915
11916 spin_unlock(&mce_state_lock);
11917
11918 @@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
11919 {
11920 spin_lock(&mce_state_lock);
11921
11922 - open_count--;
11923 + local_dec(&open_count);
11924 open_exclu = 0;
11925
11926 spin_unlock(&mce_state_lock);
11927 @@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
11928 static void mce_reset(void)
11929 {
11930 cpu_missing = 0;
11931 - atomic_set(&mce_fake_paniced, 0);
11932 + atomic_set_unchecked(&mce_fake_paniced, 0);
11933 atomic_set(&mce_executing, 0);
11934 atomic_set(&mce_callin, 0);
11935 atomic_set(&global_nwo, 0);
11936 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c
11937 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
11938 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
11939 @@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
11940 return 0;
11941 }
11942
11943 -static struct mtrr_ops amd_mtrr_ops = {
11944 +static const struct mtrr_ops amd_mtrr_ops = {
11945 .vendor = X86_VENDOR_AMD,
11946 .set = amd_set_mtrr,
11947 .get = amd_get_mtrr,
11948 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c
11949 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
11950 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
11951 @@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
11952 return 0;
11953 }
11954
11955 -static struct mtrr_ops centaur_mtrr_ops = {
11956 +static const struct mtrr_ops centaur_mtrr_ops = {
11957 .vendor = X86_VENDOR_CENTAUR,
11958 .set = centaur_set_mcr,
11959 .get = centaur_get_mcr,
11960 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c
11961 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
11962 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
11963 @@ -265,7 +265,7 @@ static void cyrix_set_all(void)
11964 post_set();
11965 }
11966
11967 -static struct mtrr_ops cyrix_mtrr_ops = {
11968 +static const struct mtrr_ops cyrix_mtrr_ops = {
11969 .vendor = X86_VENDOR_CYRIX,
11970 .set_all = cyrix_set_all,
11971 .set = cyrix_set_arr,
11972 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c
11973 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
11974 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
11975 @@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
11976 /*
11977 * Generic structure...
11978 */
11979 -struct mtrr_ops generic_mtrr_ops = {
11980 +const struct mtrr_ops generic_mtrr_ops = {
11981 .use_intel_if = 1,
11982 .set_all = generic_set_all,
11983 .get = generic_get_mtrr,
11984 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c
11985 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
11986 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
11987 @@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
11988 u64 size_or_mask, size_and_mask;
11989 static bool mtrr_aps_delayed_init;
11990
11991 -static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
11992 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
11993
11994 -struct mtrr_ops *mtrr_if;
11995 +const struct mtrr_ops *mtrr_if;
11996
11997 static void set_mtrr(unsigned int reg, unsigned long base,
11998 unsigned long size, mtrr_type type);
11999
12000 -void set_mtrr_ops(struct mtrr_ops *ops)
12001 +void set_mtrr_ops(const struct mtrr_ops *ops)
12002 {
12003 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12004 mtrr_ops[ops->vendor] = ops;
12005 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h
12006 --- linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12007 +++ linux-2.6.32.42/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
12008 @@ -12,19 +12,19 @@
12009 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
12010
12011 struct mtrr_ops {
12012 - u32 vendor;
12013 - u32 use_intel_if;
12014 - void (*set)(unsigned int reg, unsigned long base,
12015 + const u32 vendor;
12016 + const u32 use_intel_if;
12017 + void (* const set)(unsigned int reg, unsigned long base,
12018 unsigned long size, mtrr_type type);
12019 - void (*set_all)(void);
12020 + void (* const set_all)(void);
12021
12022 - void (*get)(unsigned int reg, unsigned long *base,
12023 + void (* const get)(unsigned int reg, unsigned long *base,
12024 unsigned long *size, mtrr_type *type);
12025 - int (*get_free_region)(unsigned long base, unsigned long size,
12026 + int (* const get_free_region)(unsigned long base, unsigned long size,
12027 int replace_reg);
12028 - int (*validate_add_page)(unsigned long base, unsigned long size,
12029 + int (* const validate_add_page)(unsigned long base, unsigned long size,
12030 unsigned int type);
12031 - int (*have_wrcomb)(void);
12032 + int (* const have_wrcomb)(void);
12033 };
12034
12035 extern int generic_get_free_region(unsigned long base, unsigned long size,
12036 @@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
12037 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12038 unsigned int type);
12039
12040 -extern struct mtrr_ops generic_mtrr_ops;
12041 +extern const struct mtrr_ops generic_mtrr_ops;
12042
12043 extern int positive_have_wrcomb(void);
12044
12045 @@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12046 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12047 void get_mtrr_state(void);
12048
12049 -extern void set_mtrr_ops(struct mtrr_ops *ops);
12050 +extern void set_mtrr_ops(const struct mtrr_ops *ops);
12051
12052 extern u64 size_or_mask, size_and_mask;
12053 -extern struct mtrr_ops *mtrr_if;
12054 +extern const struct mtrr_ops *mtrr_if;
12055
12056 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12057 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12058 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c
12059 --- linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12060 +++ linux-2.6.32.42/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12061 @@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12062
12063 /* Interface defining a CPU specific perfctr watchdog */
12064 struct wd_ops {
12065 - int (*reserve)(void);
12066 - void (*unreserve)(void);
12067 - int (*setup)(unsigned nmi_hz);
12068 - void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12069 - void (*stop)(void);
12070 + int (* const reserve)(void);
12071 + void (* const unreserve)(void);
12072 + int (* const setup)(unsigned nmi_hz);
12073 + void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12074 + void (* const stop)(void);
12075 unsigned perfctr;
12076 unsigned evntsel;
12077 u64 checkbit;
12078 @@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12079 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12080 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12081
12082 +/* cannot be const */
12083 static struct wd_ops intel_arch_wd_ops;
12084
12085 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12086 @@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12087 return 1;
12088 }
12089
12090 +/* cannot be const */
12091 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12092 .reserve = single_msr_reserve,
12093 .unreserve = single_msr_unreserve,
12094 diff -urNp linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c
12095 --- linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12096 +++ linux-2.6.32.42/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12097 @@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12098 * count to the generic event atomically:
12099 */
12100 again:
12101 - prev_raw_count = atomic64_read(&hwc->prev_count);
12102 + prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12103 rdmsrl(hwc->event_base + idx, new_raw_count);
12104
12105 - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12106 + if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12107 new_raw_count) != prev_raw_count)
12108 goto again;
12109
12110 @@ -741,7 +741,7 @@ again:
12111 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12112 delta >>= shift;
12113
12114 - atomic64_add(delta, &event->count);
12115 + atomic64_add_unchecked(delta, &event->count);
12116 atomic64_sub(delta, &hwc->period_left);
12117
12118 return new_raw_count;
12119 @@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12120 * The hw event starts counting from this event offset,
12121 * mark it to be able to extra future deltas:
12122 */
12123 - atomic64_set(&hwc->prev_count, (u64)-left);
12124 + atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12125
12126 err = checking_wrmsrl(hwc->event_base + idx,
12127 (u64)(-left) & x86_pmu.event_mask);
12128 @@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12129 break;
12130
12131 callchain_store(entry, frame.return_address);
12132 - fp = frame.next_frame;
12133 + fp = (__force const void __user *)frame.next_frame;
12134 }
12135 }
12136
12137 diff -urNp linux-2.6.32.42/arch/x86/kernel/crash.c linux-2.6.32.42/arch/x86/kernel/crash.c
12138 --- linux-2.6.32.42/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12139 +++ linux-2.6.32.42/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12140 @@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12141 regs = args->regs;
12142
12143 #ifdef CONFIG_X86_32
12144 - if (!user_mode_vm(regs)) {
12145 + if (!user_mode(regs)) {
12146 crash_fixup_ss_esp(&fixed_regs, regs);
12147 regs = &fixed_regs;
12148 }
12149 diff -urNp linux-2.6.32.42/arch/x86/kernel/doublefault_32.c linux-2.6.32.42/arch/x86/kernel/doublefault_32.c
12150 --- linux-2.6.32.42/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12151 +++ linux-2.6.32.42/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12152 @@ -11,7 +11,7 @@
12153
12154 #define DOUBLEFAULT_STACKSIZE (1024)
12155 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12156 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12157 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12158
12159 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12160
12161 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
12162 unsigned long gdt, tss;
12163
12164 store_gdt(&gdt_desc);
12165 - gdt = gdt_desc.address;
12166 + gdt = (unsigned long)gdt_desc.address;
12167
12168 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12169
12170 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12171 /* 0x2 bit is always set */
12172 .flags = X86_EFLAGS_SF | 0x2,
12173 .sp = STACK_START,
12174 - .es = __USER_DS,
12175 + .es = __KERNEL_DS,
12176 .cs = __KERNEL_CS,
12177 .ss = __KERNEL_DS,
12178 - .ds = __USER_DS,
12179 + .ds = __KERNEL_DS,
12180 .fs = __KERNEL_PERCPU,
12181
12182 .__cr3 = __pa_nodebug(swapper_pg_dir),
12183 diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c
12184 --- linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12185 +++ linux-2.6.32.42/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12186 @@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12187 #endif
12188
12189 for (;;) {
12190 - struct thread_info *context;
12191 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12192 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12193
12194 - context = (struct thread_info *)
12195 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12196 - bp = print_context_stack(context, stack, bp, ops,
12197 - data, NULL, &graph);
12198 -
12199 - stack = (unsigned long *)context->previous_esp;
12200 - if (!stack)
12201 + if (stack_start == task_stack_page(task))
12202 break;
12203 + stack = *(unsigned long **)stack_start;
12204 if (ops->stack(data, "IRQ") < 0)
12205 break;
12206 touch_nmi_watchdog();
12207 @@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12208 * When in-kernel, we also print out the stack and code at the
12209 * time of the fault..
12210 */
12211 - if (!user_mode_vm(regs)) {
12212 + if (!user_mode(regs)) {
12213 unsigned int code_prologue = code_bytes * 43 / 64;
12214 unsigned int code_len = code_bytes;
12215 unsigned char c;
12216 u8 *ip;
12217 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12218
12219 printk(KERN_EMERG "Stack:\n");
12220 show_stack_log_lvl(NULL, regs, &regs->sp,
12221 @@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12222
12223 printk(KERN_EMERG "Code: ");
12224
12225 - ip = (u8 *)regs->ip - code_prologue;
12226 + ip = (u8 *)regs->ip - code_prologue + cs_base;
12227 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12228 /* try starting at IP */
12229 - ip = (u8 *)regs->ip;
12230 + ip = (u8 *)regs->ip + cs_base;
12231 code_len = code_len - code_prologue + 1;
12232 }
12233 for (i = 0; i < code_len; i++, ip++) {
12234 @@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12235 printk(" Bad EIP value.");
12236 break;
12237 }
12238 - if (ip == (u8 *)regs->ip)
12239 + if (ip == (u8 *)regs->ip + cs_base)
12240 printk("<%02x> ", c);
12241 else
12242 printk("%02x ", c);
12243 @@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12244 {
12245 unsigned short ud2;
12246
12247 + ip = ktla_ktva(ip);
12248 if (ip < PAGE_OFFSET)
12249 return 0;
12250 if (probe_kernel_address((unsigned short *)ip, ud2))
12251 diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c
12252 --- linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12253 +++ linux-2.6.32.42/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12254 @@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12255 unsigned long *irq_stack_end =
12256 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12257 unsigned used = 0;
12258 - struct thread_info *tinfo;
12259 int graph = 0;
12260 + void *stack_start;
12261
12262 if (!task)
12263 task = current;
12264 @@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12265 * current stack address. If the stacks consist of nested
12266 * exceptions
12267 */
12268 - tinfo = task_thread_info(task);
12269 for (;;) {
12270 char *id;
12271 unsigned long *estack_end;
12272 +
12273 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12274 &used, &id);
12275
12276 @@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12277 if (ops->stack(data, id) < 0)
12278 break;
12279
12280 - bp = print_context_stack(tinfo, stack, bp, ops,
12281 + bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12282 data, estack_end, &graph);
12283 ops->stack(data, "<EOE>");
12284 /*
12285 @@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12286 if (stack >= irq_stack && stack < irq_stack_end) {
12287 if (ops->stack(data, "IRQ") < 0)
12288 break;
12289 - bp = print_context_stack(tinfo, stack, bp,
12290 + bp = print_context_stack(task, irq_stack, stack, bp,
12291 ops, data, irq_stack_end, &graph);
12292 /*
12293 * We link to the next stack (which would be
12294 @@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12295 /*
12296 * This handles the process stack:
12297 */
12298 - bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12299 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12300 + bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12301 put_cpu();
12302 }
12303 EXPORT_SYMBOL(dump_trace);
12304 diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack.c linux-2.6.32.42/arch/x86/kernel/dumpstack.c
12305 --- linux-2.6.32.42/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12306 +++ linux-2.6.32.42/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12307 @@ -2,6 +2,9 @@
12308 * Copyright (C) 1991, 1992 Linus Torvalds
12309 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12310 */
12311 +#ifdef CONFIG_GRKERNSEC_HIDESYM
12312 +#define __INCLUDED_BY_HIDESYM 1
12313 +#endif
12314 #include <linux/kallsyms.h>
12315 #include <linux/kprobes.h>
12316 #include <linux/uaccess.h>
12317 @@ -28,7 +31,7 @@ static int die_counter;
12318
12319 void printk_address(unsigned long address, int reliable)
12320 {
12321 - printk(" [<%p>] %s%pS\n", (void *) address,
12322 + printk(" [<%p>] %s%pA\n", (void *) address,
12323 reliable ? "" : "? ", (void *) address);
12324 }
12325
12326 @@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12327 static void
12328 print_ftrace_graph_addr(unsigned long addr, void *data,
12329 const struct stacktrace_ops *ops,
12330 - struct thread_info *tinfo, int *graph)
12331 + struct task_struct *task, int *graph)
12332 {
12333 - struct task_struct *task = tinfo->task;
12334 unsigned long ret_addr;
12335 int index = task->curr_ret_stack;
12336
12337 @@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12338 static inline void
12339 print_ftrace_graph_addr(unsigned long addr, void *data,
12340 const struct stacktrace_ops *ops,
12341 - struct thread_info *tinfo, int *graph)
12342 + struct task_struct *task, int *graph)
12343 { }
12344 #endif
12345
12346 @@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12347 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12348 */
12349
12350 -static inline int valid_stack_ptr(struct thread_info *tinfo,
12351 - void *p, unsigned int size, void *end)
12352 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12353 {
12354 - void *t = tinfo;
12355 if (end) {
12356 if (p < end && p >= (end-THREAD_SIZE))
12357 return 1;
12358 @@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12359 }
12360
12361 unsigned long
12362 -print_context_stack(struct thread_info *tinfo,
12363 +print_context_stack(struct task_struct *task, void *stack_start,
12364 unsigned long *stack, unsigned long bp,
12365 const struct stacktrace_ops *ops, void *data,
12366 unsigned long *end, int *graph)
12367 {
12368 struct stack_frame *frame = (struct stack_frame *)bp;
12369
12370 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12371 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12372 unsigned long addr;
12373
12374 addr = *stack;
12375 @@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12376 } else {
12377 ops->address(data, addr, 0);
12378 }
12379 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12380 + print_ftrace_graph_addr(addr, data, ops, task, graph);
12381 }
12382 stack++;
12383 }
12384 @@ -180,7 +180,7 @@ void dump_stack(void)
12385 #endif
12386
12387 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12388 - current->pid, current->comm, print_tainted(),
12389 + task_pid_nr(current), current->comm, print_tainted(),
12390 init_utsname()->release,
12391 (int)strcspn(init_utsname()->version, " "),
12392 init_utsname()->version);
12393 @@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12394 return flags;
12395 }
12396
12397 +extern void gr_handle_kernel_exploit(void);
12398 +
12399 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12400 {
12401 if (regs && kexec_should_crash(current))
12402 @@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12403 panic("Fatal exception in interrupt");
12404 if (panic_on_oops)
12405 panic("Fatal exception");
12406 - do_exit(signr);
12407 +
12408 + gr_handle_kernel_exploit();
12409 +
12410 + do_group_exit(signr);
12411 }
12412
12413 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12414 @@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12415 unsigned long flags = oops_begin();
12416 int sig = SIGSEGV;
12417
12418 - if (!user_mode_vm(regs))
12419 + if (!user_mode(regs))
12420 report_bug(regs->ip, regs);
12421
12422 if (__die(str, regs, err))
12423 diff -urNp linux-2.6.32.42/arch/x86/kernel/dumpstack.h linux-2.6.32.42/arch/x86/kernel/dumpstack.h
12424 --- linux-2.6.32.42/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
12425 +++ linux-2.6.32.42/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
12426 @@ -15,7 +15,7 @@
12427 #endif
12428
12429 extern unsigned long
12430 -print_context_stack(struct thread_info *tinfo,
12431 +print_context_stack(struct task_struct *task, void *stack_start,
12432 unsigned long *stack, unsigned long bp,
12433 const struct stacktrace_ops *ops, void *data,
12434 unsigned long *end, int *graph);
12435 diff -urNp linux-2.6.32.42/arch/x86/kernel/e820.c linux-2.6.32.42/arch/x86/kernel/e820.c
12436 --- linux-2.6.32.42/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
12437 +++ linux-2.6.32.42/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
12438 @@ -733,7 +733,7 @@ struct early_res {
12439 };
12440 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
12441 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
12442 - {}
12443 + { 0, 0, {0}, 0 }
12444 };
12445
12446 static int __init find_overlapped_early(u64 start, u64 end)
12447 diff -urNp linux-2.6.32.42/arch/x86/kernel/early_printk.c linux-2.6.32.42/arch/x86/kernel/early_printk.c
12448 --- linux-2.6.32.42/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
12449 +++ linux-2.6.32.42/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
12450 @@ -7,6 +7,7 @@
12451 #include <linux/pci_regs.h>
12452 #include <linux/pci_ids.h>
12453 #include <linux/errno.h>
12454 +#include <linux/sched.h>
12455 #include <asm/io.h>
12456 #include <asm/processor.h>
12457 #include <asm/fcntl.h>
12458 @@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
12459 int n;
12460 va_list ap;
12461
12462 + pax_track_stack();
12463 +
12464 va_start(ap, fmt);
12465 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12466 early_console->write(early_console, buf, n);
12467 diff -urNp linux-2.6.32.42/arch/x86/kernel/efi_32.c linux-2.6.32.42/arch/x86/kernel/efi_32.c
12468 --- linux-2.6.32.42/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
12469 +++ linux-2.6.32.42/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
12470 @@ -38,70 +38,38 @@
12471 */
12472
12473 static unsigned long efi_rt_eflags;
12474 -static pgd_t efi_bak_pg_dir_pointer[2];
12475 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
12476
12477 -void efi_call_phys_prelog(void)
12478 +void __init efi_call_phys_prelog(void)
12479 {
12480 - unsigned long cr4;
12481 - unsigned long temp;
12482 struct desc_ptr gdt_descr;
12483
12484 local_irq_save(efi_rt_eflags);
12485
12486 - /*
12487 - * If I don't have PAE, I should just duplicate two entries in page
12488 - * directory. If I have PAE, I just need to duplicate one entry in
12489 - * page directory.
12490 - */
12491 - cr4 = read_cr4_safe();
12492
12493 - if (cr4 & X86_CR4_PAE) {
12494 - efi_bak_pg_dir_pointer[0].pgd =
12495 - swapper_pg_dir[pgd_index(0)].pgd;
12496 - swapper_pg_dir[0].pgd =
12497 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12498 - } else {
12499 - efi_bak_pg_dir_pointer[0].pgd =
12500 - swapper_pg_dir[pgd_index(0)].pgd;
12501 - efi_bak_pg_dir_pointer[1].pgd =
12502 - swapper_pg_dir[pgd_index(0x400000)].pgd;
12503 - swapper_pg_dir[pgd_index(0)].pgd =
12504 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12505 - temp = PAGE_OFFSET + 0x400000;
12506 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12507 - swapper_pg_dir[pgd_index(temp)].pgd;
12508 - }
12509 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
12510 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
12511 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
12512
12513 /*
12514 * After the lock is released, the original page table is restored.
12515 */
12516 __flush_tlb_all();
12517
12518 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
12519 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
12520 gdt_descr.size = GDT_SIZE - 1;
12521 load_gdt(&gdt_descr);
12522 }
12523
12524 -void efi_call_phys_epilog(void)
12525 +void __init efi_call_phys_epilog(void)
12526 {
12527 - unsigned long cr4;
12528 struct desc_ptr gdt_descr;
12529
12530 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
12531 + gdt_descr.address = get_cpu_gdt_table(0);
12532 gdt_descr.size = GDT_SIZE - 1;
12533 load_gdt(&gdt_descr);
12534
12535 - cr4 = read_cr4_safe();
12536 -
12537 - if (cr4 & X86_CR4_PAE) {
12538 - swapper_pg_dir[pgd_index(0)].pgd =
12539 - efi_bak_pg_dir_pointer[0].pgd;
12540 - } else {
12541 - swapper_pg_dir[pgd_index(0)].pgd =
12542 - efi_bak_pg_dir_pointer[0].pgd;
12543 - swapper_pg_dir[pgd_index(0x400000)].pgd =
12544 - efi_bak_pg_dir_pointer[1].pgd;
12545 - }
12546 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
12547
12548 /*
12549 * After the lock is released, the original page table is restored.
12550 diff -urNp linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S
12551 --- linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
12552 +++ linux-2.6.32.42/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
12553 @@ -6,6 +6,7 @@
12554 */
12555
12556 #include <linux/linkage.h>
12557 +#include <linux/init.h>
12558 #include <asm/page_types.h>
12559
12560 /*
12561 @@ -20,7 +21,7 @@
12562 * service functions will comply with gcc calling convention, too.
12563 */
12564
12565 -.text
12566 +__INIT
12567 ENTRY(efi_call_phys)
12568 /*
12569 * 0. The function can only be called in Linux kernel. So CS has been
12570 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
12571 * The mapping of lower virtual memory has been created in prelog and
12572 * epilog.
12573 */
12574 - movl $1f, %edx
12575 - subl $__PAGE_OFFSET, %edx
12576 - jmp *%edx
12577 + jmp 1f-__PAGE_OFFSET
12578 1:
12579
12580 /*
12581 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
12582 * parameter 2, ..., param n. To make things easy, we save the return
12583 * address of efi_call_phys in a global variable.
12584 */
12585 - popl %edx
12586 - movl %edx, saved_return_addr
12587 - /* get the function pointer into ECX*/
12588 - popl %ecx
12589 - movl %ecx, efi_rt_function_ptr
12590 - movl $2f, %edx
12591 - subl $__PAGE_OFFSET, %edx
12592 - pushl %edx
12593 + popl (saved_return_addr)
12594 + popl (efi_rt_function_ptr)
12595
12596 /*
12597 * 3. Clear PG bit in %CR0.
12598 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
12599 /*
12600 * 5. Call the physical function.
12601 */
12602 - jmp *%ecx
12603 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
12604
12605 -2:
12606 /*
12607 * 6. After EFI runtime service returns, control will return to
12608 * following instruction. We'd better readjust stack pointer first.
12609 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
12610 movl %cr0, %edx
12611 orl $0x80000000, %edx
12612 movl %edx, %cr0
12613 - jmp 1f
12614 -1:
12615 +
12616 /*
12617 * 8. Now restore the virtual mode from flat mode by
12618 * adding EIP with PAGE_OFFSET.
12619 */
12620 - movl $1f, %edx
12621 - jmp *%edx
12622 + jmp 1f+__PAGE_OFFSET
12623 1:
12624
12625 /*
12626 * 9. Balance the stack. And because EAX contain the return value,
12627 * we'd better not clobber it.
12628 */
12629 - leal efi_rt_function_ptr, %edx
12630 - movl (%edx), %ecx
12631 - pushl %ecx
12632 + pushl (efi_rt_function_ptr)
12633
12634 /*
12635 - * 10. Push the saved return address onto the stack and return.
12636 + * 10. Return to the saved return address.
12637 */
12638 - leal saved_return_addr, %edx
12639 - movl (%edx), %ecx
12640 - pushl %ecx
12641 - ret
12642 + jmpl *(saved_return_addr)
12643 ENDPROC(efi_call_phys)
12644 .previous
12645
12646 -.data
12647 +__INITDATA
12648 saved_return_addr:
12649 .long 0
12650 efi_rt_function_ptr:
12651 diff -urNp linux-2.6.32.42/arch/x86/kernel/entry_32.S linux-2.6.32.42/arch/x86/kernel/entry_32.S
12652 --- linux-2.6.32.42/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
12653 +++ linux-2.6.32.42/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
12654 @@ -185,13 +185,146 @@
12655 /*CFI_REL_OFFSET gs, PT_GS*/
12656 .endm
12657 .macro SET_KERNEL_GS reg
12658 +
12659 +#ifdef CONFIG_CC_STACKPROTECTOR
12660 movl $(__KERNEL_STACK_CANARY), \reg
12661 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12662 + movl $(__USER_DS), \reg
12663 +#else
12664 + xorl \reg, \reg
12665 +#endif
12666 +
12667 movl \reg, %gs
12668 .endm
12669
12670 #endif /* CONFIG_X86_32_LAZY_GS */
12671
12672 -.macro SAVE_ALL
12673 +.macro pax_enter_kernel
12674 +#ifdef CONFIG_PAX_KERNEXEC
12675 + call pax_enter_kernel
12676 +#endif
12677 +.endm
12678 +
12679 +.macro pax_exit_kernel
12680 +#ifdef CONFIG_PAX_KERNEXEC
12681 + call pax_exit_kernel
12682 +#endif
12683 +.endm
12684 +
12685 +#ifdef CONFIG_PAX_KERNEXEC
12686 +ENTRY(pax_enter_kernel)
12687 +#ifdef CONFIG_PARAVIRT
12688 + pushl %eax
12689 + pushl %ecx
12690 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12691 + mov %eax, %esi
12692 +#else
12693 + mov %cr0, %esi
12694 +#endif
12695 + bts $16, %esi
12696 + jnc 1f
12697 + mov %cs, %esi
12698 + cmp $__KERNEL_CS, %esi
12699 + jz 3f
12700 + ljmp $__KERNEL_CS, $3f
12701 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12702 +2:
12703 +#ifdef CONFIG_PARAVIRT
12704 + mov %esi, %eax
12705 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12706 +#else
12707 + mov %esi, %cr0
12708 +#endif
12709 +3:
12710 +#ifdef CONFIG_PARAVIRT
12711 + popl %ecx
12712 + popl %eax
12713 +#endif
12714 + ret
12715 +ENDPROC(pax_enter_kernel)
12716 +
12717 +ENTRY(pax_exit_kernel)
12718 +#ifdef CONFIG_PARAVIRT
12719 + pushl %eax
12720 + pushl %ecx
12721 +#endif
12722 + mov %cs, %esi
12723 + cmp $__KERNEXEC_KERNEL_CS, %esi
12724 + jnz 2f
12725 +#ifdef CONFIG_PARAVIRT
12726 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12727 + mov %eax, %esi
12728 +#else
12729 + mov %cr0, %esi
12730 +#endif
12731 + btr $16, %esi
12732 + ljmp $__KERNEL_CS, $1f
12733 +1:
12734 +#ifdef CONFIG_PARAVIRT
12735 + mov %esi, %eax
12736 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12737 +#else
12738 + mov %esi, %cr0
12739 +#endif
12740 +2:
12741 +#ifdef CONFIG_PARAVIRT
12742 + popl %ecx
12743 + popl %eax
12744 +#endif
12745 + ret
12746 +ENDPROC(pax_exit_kernel)
12747 +#endif
12748 +
12749 +.macro pax_erase_kstack
12750 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12751 + call pax_erase_kstack
12752 +#endif
12753 +.endm
12754 +
12755 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12756 +/*
12757 + * ebp: thread_info
12758 + * ecx, edx: can be clobbered
12759 + */
12760 +ENTRY(pax_erase_kstack)
12761 + pushl %edi
12762 + pushl %eax
12763 +
12764 + mov TI_lowest_stack(%ebp), %edi
12765 + mov $-0xBEEF, %eax
12766 + std
12767 +
12768 +1: mov %edi, %ecx
12769 + and $THREAD_SIZE_asm - 1, %ecx
12770 + shr $2, %ecx
12771 + repne scasl
12772 + jecxz 2f
12773 +
12774 + cmp $2*16, %ecx
12775 + jc 2f
12776 +
12777 + mov $2*16, %ecx
12778 + repe scasl
12779 + jecxz 2f
12780 + jne 1b
12781 +
12782 +2: cld
12783 + mov %esp, %ecx
12784 + sub %edi, %ecx
12785 + shr $2, %ecx
12786 + rep stosl
12787 +
12788 + mov TI_task_thread_sp0(%ebp), %edi
12789 + sub $128, %edi
12790 + mov %edi, TI_lowest_stack(%ebp)
12791 +
12792 + popl %eax
12793 + popl %edi
12794 + ret
12795 +ENDPROC(pax_erase_kstack)
12796 +#endif
12797 +
12798 +.macro __SAVE_ALL _DS
12799 cld
12800 PUSH_GS
12801 pushl %fs
12802 @@ -224,7 +357,7 @@
12803 pushl %ebx
12804 CFI_ADJUST_CFA_OFFSET 4
12805 CFI_REL_OFFSET ebx, 0
12806 - movl $(__USER_DS), %edx
12807 + movl $\_DS, %edx
12808 movl %edx, %ds
12809 movl %edx, %es
12810 movl $(__KERNEL_PERCPU), %edx
12811 @@ -232,6 +365,15 @@
12812 SET_KERNEL_GS %edx
12813 .endm
12814
12815 +.macro SAVE_ALL
12816 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
12817 + __SAVE_ALL __KERNEL_DS
12818 + pax_enter_kernel
12819 +#else
12820 + __SAVE_ALL __USER_DS
12821 +#endif
12822 +.endm
12823 +
12824 .macro RESTORE_INT_REGS
12825 popl %ebx
12826 CFI_ADJUST_CFA_OFFSET -4
12827 @@ -352,7 +494,15 @@ check_userspace:
12828 movb PT_CS(%esp), %al
12829 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
12830 cmpl $USER_RPL, %eax
12831 +
12832 +#ifdef CONFIG_PAX_KERNEXEC
12833 + jae resume_userspace
12834 +
12835 + PAX_EXIT_KERNEL
12836 + jmp resume_kernel
12837 +#else
12838 jb resume_kernel # not returning to v8086 or userspace
12839 +#endif
12840
12841 ENTRY(resume_userspace)
12842 LOCKDEP_SYS_EXIT
12843 @@ -364,7 +514,7 @@ ENTRY(resume_userspace)
12844 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
12845 # int/exception return?
12846 jne work_pending
12847 - jmp restore_all
12848 + jmp restore_all_pax
12849 END(ret_from_exception)
12850
12851 #ifdef CONFIG_PREEMPT
12852 @@ -414,25 +564,36 @@ sysenter_past_esp:
12853 /*CFI_REL_OFFSET cs, 0*/
12854 /*
12855 * Push current_thread_info()->sysenter_return to the stack.
12856 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
12857 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
12858 */
12859 - pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
12860 + pushl $0
12861 CFI_ADJUST_CFA_OFFSET 4
12862 CFI_REL_OFFSET eip, 0
12863
12864 pushl %eax
12865 CFI_ADJUST_CFA_OFFSET 4
12866 SAVE_ALL
12867 + GET_THREAD_INFO(%ebp)
12868 + movl TI_sysenter_return(%ebp),%ebp
12869 + movl %ebp,PT_EIP(%esp)
12870 ENABLE_INTERRUPTS(CLBR_NONE)
12871
12872 /*
12873 * Load the potential sixth argument from user stack.
12874 * Careful about security.
12875 */
12876 + movl PT_OLDESP(%esp),%ebp
12877 +
12878 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12879 + mov PT_OLDSS(%esp),%ds
12880 +1: movl %ds:(%ebp),%ebp
12881 + push %ss
12882 + pop %ds
12883 +#else
12884 cmpl $__PAGE_OFFSET-3,%ebp
12885 jae syscall_fault
12886 1: movl (%ebp),%ebp
12887 +#endif
12888 +
12889 movl %ebp,PT_EBP(%esp)
12890 .section __ex_table,"a"
12891 .align 4
12892 @@ -455,12 +616,23 @@ sysenter_do_call:
12893 testl $_TIF_ALLWORK_MASK, %ecx
12894 jne sysexit_audit
12895 sysenter_exit:
12896 +
12897 +#ifdef CONFIG_PAX_RANDKSTACK
12898 + pushl_cfi %eax
12899 + call pax_randomize_kstack
12900 + popl_cfi %eax
12901 +#endif
12902 +
12903 + pax_erase_kstack
12904 +
12905 /* if something modifies registers it must also disable sysexit */
12906 movl PT_EIP(%esp), %edx
12907 movl PT_OLDESP(%esp), %ecx
12908 xorl %ebp,%ebp
12909 TRACE_IRQS_ON
12910 1: mov PT_FS(%esp), %fs
12911 +2: mov PT_DS(%esp), %ds
12912 +3: mov PT_ES(%esp), %es
12913 PTGS_TO_GS
12914 ENABLE_INTERRUPTS_SYSEXIT
12915
12916 @@ -477,6 +649,9 @@ sysenter_audit:
12917 movl %eax,%edx /* 2nd arg: syscall number */
12918 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
12919 call audit_syscall_entry
12920 +
12921 + pax_erase_kstack
12922 +
12923 pushl %ebx
12924 CFI_ADJUST_CFA_OFFSET 4
12925 movl PT_EAX(%esp),%eax /* reload syscall number */
12926 @@ -504,11 +679,17 @@ sysexit_audit:
12927
12928 CFI_ENDPROC
12929 .pushsection .fixup,"ax"
12930 -2: movl $0,PT_FS(%esp)
12931 +4: movl $0,PT_FS(%esp)
12932 + jmp 1b
12933 +5: movl $0,PT_DS(%esp)
12934 + jmp 1b
12935 +6: movl $0,PT_ES(%esp)
12936 jmp 1b
12937 .section __ex_table,"a"
12938 .align 4
12939 - .long 1b,2b
12940 + .long 1b,4b
12941 + .long 2b,5b
12942 + .long 3b,6b
12943 .popsection
12944 PTGS_TO_GS_EX
12945 ENDPROC(ia32_sysenter_target)
12946 @@ -538,6 +719,14 @@ syscall_exit:
12947 testl $_TIF_ALLWORK_MASK, %ecx # current->work
12948 jne syscall_exit_work
12949
12950 +restore_all_pax:
12951 +
12952 +#ifdef CONFIG_PAX_RANDKSTACK
12953 + call pax_randomize_kstack
12954 +#endif
12955 +
12956 + pax_erase_kstack
12957 +
12958 restore_all:
12959 TRACE_IRQS_IRET
12960 restore_all_notrace:
12961 @@ -602,7 +791,13 @@ ldt_ss:
12962 mov PT_OLDESP(%esp), %eax /* load userspace esp */
12963 mov %dx, %ax /* eax: new kernel esp */
12964 sub %eax, %edx /* offset (low word is 0) */
12965 - PER_CPU(gdt_page, %ebx)
12966 +#ifdef CONFIG_SMP
12967 + movl PER_CPU_VAR(cpu_number), %ebx
12968 + shll $PAGE_SHIFT_asm, %ebx
12969 + addl $cpu_gdt_table, %ebx
12970 +#else
12971 + movl $cpu_gdt_table, %ebx
12972 +#endif
12973 shr $16, %edx
12974 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
12975 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
12976 @@ -636,31 +831,25 @@ work_resched:
12977 movl TI_flags(%ebp), %ecx
12978 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
12979 # than syscall tracing?
12980 - jz restore_all
12981 + jz restore_all_pax
12982 testb $_TIF_NEED_RESCHED, %cl
12983 jnz work_resched
12984
12985 work_notifysig: # deal with pending signals and
12986 # notify-resume requests
12987 + movl %esp, %eax
12988 #ifdef CONFIG_VM86
12989 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
12990 - movl %esp, %eax
12991 - jne work_notifysig_v86 # returning to kernel-space or
12992 + jz 1f # returning to kernel-space or
12993 # vm86-space
12994 - xorl %edx, %edx
12995 - call do_notify_resume
12996 - jmp resume_userspace_sig
12997
12998 - ALIGN
12999 -work_notifysig_v86:
13000 pushl %ecx # save ti_flags for do_notify_resume
13001 CFI_ADJUST_CFA_OFFSET 4
13002 call save_v86_state # %eax contains pt_regs pointer
13003 popl %ecx
13004 CFI_ADJUST_CFA_OFFSET -4
13005 movl %eax, %esp
13006 -#else
13007 - movl %esp, %eax
13008 +1:
13009 #endif
13010 xorl %edx, %edx
13011 call do_notify_resume
13012 @@ -673,6 +862,9 @@ syscall_trace_entry:
13013 movl $-ENOSYS,PT_EAX(%esp)
13014 movl %esp, %eax
13015 call syscall_trace_enter
13016 +
13017 + pax_erase_kstack
13018 +
13019 /* What it returned is what we'll actually use. */
13020 cmpl $(nr_syscalls), %eax
13021 jnae syscall_call
13022 @@ -695,6 +887,10 @@ END(syscall_exit_work)
13023
13024 RING0_INT_FRAME # can't unwind into user space anyway
13025 syscall_fault:
13026 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13027 + push %ss
13028 + pop %ds
13029 +#endif
13030 GET_THREAD_INFO(%ebp)
13031 movl $-EFAULT,PT_EAX(%esp)
13032 jmp resume_userspace
13033 @@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
13034 PTREGSCALL(vm86)
13035 PTREGSCALL(vm86old)
13036
13037 + ALIGN;
13038 +ENTRY(kernel_execve)
13039 + push %ebp
13040 + sub $PT_OLDSS+4,%esp
13041 + push %edi
13042 + push %ecx
13043 + push %eax
13044 + lea 3*4(%esp),%edi
13045 + mov $PT_OLDSS/4+1,%ecx
13046 + xorl %eax,%eax
13047 + rep stosl
13048 + pop %eax
13049 + pop %ecx
13050 + pop %edi
13051 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13052 + mov %eax,PT_EBX(%esp)
13053 + mov %edx,PT_ECX(%esp)
13054 + mov %ecx,PT_EDX(%esp)
13055 + mov %esp,%eax
13056 + call sys_execve
13057 + GET_THREAD_INFO(%ebp)
13058 + test %eax,%eax
13059 + jz syscall_exit
13060 + add $PT_OLDSS+4,%esp
13061 + pop %ebp
13062 + ret
13063 +
13064 .macro FIXUP_ESPFIX_STACK
13065 /*
13066 * Switch back for ESPFIX stack to the normal zerobased stack
13067 @@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
13068 * normal stack and adjusts ESP with the matching offset.
13069 */
13070 /* fixup the stack */
13071 - PER_CPU(gdt_page, %ebx)
13072 +#ifdef CONFIG_SMP
13073 + movl PER_CPU_VAR(cpu_number), %ebx
13074 + shll $PAGE_SHIFT_asm, %ebx
13075 + addl $cpu_gdt_table, %ebx
13076 +#else
13077 + movl $cpu_gdt_table, %ebx
13078 +#endif
13079 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13080 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13081 shl $16, %eax
13082 @@ -1198,7 +1427,6 @@ return_to_handler:
13083 ret
13084 #endif
13085
13086 -.section .rodata,"a"
13087 #include "syscall_table_32.S"
13088
13089 syscall_table_size=(.-sys_call_table)
13090 @@ -1255,9 +1483,12 @@ error_code:
13091 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13092 REG_TO_PTGS %ecx
13093 SET_KERNEL_GS %ecx
13094 - movl $(__USER_DS), %ecx
13095 + movl $(__KERNEL_DS), %ecx
13096 movl %ecx, %ds
13097 movl %ecx, %es
13098 +
13099 + pax_enter_kernel
13100 +
13101 TRACE_IRQS_OFF
13102 movl %esp,%eax # pt_regs pointer
13103 call *%edi
13104 @@ -1351,6 +1582,9 @@ nmi_stack_correct:
13105 xorl %edx,%edx # zero error code
13106 movl %esp,%eax # pt_regs pointer
13107 call do_nmi
13108 +
13109 + pax_exit_kernel
13110 +
13111 jmp restore_all_notrace
13112 CFI_ENDPROC
13113
13114 @@ -1391,6 +1625,9 @@ nmi_espfix_stack:
13115 FIXUP_ESPFIX_STACK # %eax == %esp
13116 xorl %edx,%edx # zero error code
13117 call do_nmi
13118 +
13119 + pax_exit_kernel
13120 +
13121 RESTORE_REGS
13122 lss 12+4(%esp), %esp # back to espfix stack
13123 CFI_ADJUST_CFA_OFFSET -24
13124 diff -urNp linux-2.6.32.42/arch/x86/kernel/entry_64.S linux-2.6.32.42/arch/x86/kernel/entry_64.S
13125 --- linux-2.6.32.42/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13126 +++ linux-2.6.32.42/arch/x86/kernel/entry_64.S 2011-06-04 20:30:53.000000000 -0400
13127 @@ -53,6 +53,7 @@
13128 #include <asm/paravirt.h>
13129 #include <asm/ftrace.h>
13130 #include <asm/percpu.h>
13131 +#include <asm/pgtable.h>
13132
13133 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13134 #include <linux/elf-em.h>
13135 @@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13136 ENDPROC(native_usergs_sysret64)
13137 #endif /* CONFIG_PARAVIRT */
13138
13139 + .macro ljmpq sel, off
13140 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13141 + .byte 0x48; ljmp *1234f(%rip)
13142 + .pushsection .rodata
13143 + .align 16
13144 + 1234: .quad \off; .word \sel
13145 + .popsection
13146 +#else
13147 + pushq $\sel
13148 + pushq $\off
13149 + lretq
13150 +#endif
13151 + .endm
13152 +
13153 + .macro pax_enter_kernel
13154 +#ifdef CONFIG_PAX_KERNEXEC
13155 + call pax_enter_kernel
13156 +#endif
13157 + .endm
13158 +
13159 + .macro pax_exit_kernel
13160 +#ifdef CONFIG_PAX_KERNEXEC
13161 + call pax_exit_kernel
13162 +#endif
13163 + .endm
13164 +
13165 +#ifdef CONFIG_PAX_KERNEXEC
13166 +ENTRY(pax_enter_kernel)
13167 + pushq %rdi
13168 +
13169 +#ifdef CONFIG_PARAVIRT
13170 + PV_SAVE_REGS(CLBR_RDI)
13171 +#endif
13172 +
13173 + GET_CR0_INTO_RDI
13174 + bts $16,%rdi
13175 + jnc 1f
13176 + mov %cs,%edi
13177 + cmp $__KERNEL_CS,%edi
13178 + jz 3f
13179 + ljmpq __KERNEL_CS,3f
13180 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
13181 +2: SET_RDI_INTO_CR0
13182 +3:
13183 +
13184 +#ifdef CONFIG_PARAVIRT
13185 + PV_RESTORE_REGS(CLBR_RDI)
13186 +#endif
13187 +
13188 + popq %rdi
13189 + retq
13190 +ENDPROC(pax_enter_kernel)
13191 +
13192 +ENTRY(pax_exit_kernel)
13193 + pushq %rdi
13194 +
13195 +#ifdef CONFIG_PARAVIRT
13196 + PV_SAVE_REGS(CLBR_RDI)
13197 +#endif
13198 +
13199 + mov %cs,%rdi
13200 + cmp $__KERNEXEC_KERNEL_CS,%edi
13201 + jnz 2f
13202 + GET_CR0_INTO_RDI
13203 + btr $16,%rdi
13204 + ljmpq __KERNEL_CS,1f
13205 +1: SET_RDI_INTO_CR0
13206 +2:
13207 +
13208 +#ifdef CONFIG_PARAVIRT
13209 + PV_RESTORE_REGS(CLBR_RDI);
13210 +#endif
13211 +
13212 + popq %rdi
13213 + retq
13214 +ENDPROC(pax_exit_kernel)
13215 +#endif
13216 +
13217 + .macro pax_enter_kernel_user
13218 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13219 + call pax_enter_kernel_user
13220 +#endif
13221 + .endm
13222 +
13223 + .macro pax_exit_kernel_user
13224 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13225 + call pax_exit_kernel_user
13226 +#endif
13227 +#ifdef CONFIG_PAX_RANDKSTACK
13228 + push %rax
13229 + call pax_randomize_kstack
13230 + pop %rax
13231 +#endif
13232 + pax_erase_kstack
13233 + .endm
13234 +
13235 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13236 +ENTRY(pax_enter_kernel_user)
13237 + pushq %rdi
13238 + pushq %rbx
13239 +
13240 +#ifdef CONFIG_PARAVIRT
13241 + PV_SAVE_REGS(CLBR_RDI)
13242 +#endif
13243 +
13244 + GET_CR3_INTO_RDI
13245 + mov %rdi,%rbx
13246 + add $__START_KERNEL_map,%rbx
13247 + sub phys_base(%rip),%rbx
13248 +
13249 +#ifdef CONFIG_PARAVIRT
13250 + pushq %rdi
13251 + cmpl $0, pv_info+PARAVIRT_enabled
13252 + jz 1f
13253 + i = 0
13254 + .rept USER_PGD_PTRS
13255 + mov i*8(%rbx),%rsi
13256 + mov $0,%sil
13257 + lea i*8(%rbx),%rdi
13258 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13259 + i = i + 1
13260 + .endr
13261 + jmp 2f
13262 +1:
13263 +#endif
13264 +
13265 + i = 0
13266 + .rept USER_PGD_PTRS
13267 + movb $0,i*8(%rbx)
13268 + i = i + 1
13269 + .endr
13270 +
13271 +#ifdef CONFIG_PARAVIRT
13272 +2: popq %rdi
13273 +#endif
13274 + SET_RDI_INTO_CR3
13275 +
13276 +#ifdef CONFIG_PAX_KERNEXEC
13277 + GET_CR0_INTO_RDI
13278 + bts $16,%rdi
13279 + SET_RDI_INTO_CR0
13280 +#endif
13281 +
13282 +#ifdef CONFIG_PARAVIRT
13283 + PV_RESTORE_REGS(CLBR_RDI)
13284 +#endif
13285 +
13286 + popq %rbx
13287 + popq %rdi
13288 + retq
13289 +ENDPROC(pax_enter_kernel_user)
13290 +
13291 +ENTRY(pax_exit_kernel_user)
13292 + push %rdi
13293 +
13294 +#ifdef CONFIG_PARAVIRT
13295 + pushq %rbx
13296 + PV_SAVE_REGS(CLBR_RDI)
13297 +#endif
13298 +
13299 +#ifdef CONFIG_PAX_KERNEXEC
13300 + GET_CR0_INTO_RDI
13301 + btr $16,%rdi
13302 + SET_RDI_INTO_CR0
13303 +#endif
13304 +
13305 + GET_CR3_INTO_RDI
13306 + add $__START_KERNEL_map,%rdi
13307 + sub phys_base(%rip),%rdi
13308 +
13309 +#ifdef CONFIG_PARAVIRT
13310 + cmpl $0, pv_info+PARAVIRT_enabled
13311 + jz 1f
13312 + mov %rdi,%rbx
13313 + i = 0
13314 + .rept USER_PGD_PTRS
13315 + mov i*8(%rbx),%rsi
13316 + mov $0x67,%sil
13317 + lea i*8(%rbx),%rdi
13318 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13319 + i = i + 1
13320 + .endr
13321 + jmp 2f
13322 +1:
13323 +#endif
13324 +
13325 + i = 0
13326 + .rept USER_PGD_PTRS
13327 + movb $0x67,i*8(%rdi)
13328 + i = i + 1
13329 + .endr
13330 +
13331 +#ifdef CONFIG_PARAVIRT
13332 +2: PV_RESTORE_REGS(CLBR_RDI)
13333 + popq %rbx
13334 +#endif
13335 +
13336 + popq %rdi
13337 + retq
13338 +ENDPROC(pax_exit_kernel_user)
13339 +#endif
13340 +
13341 +.macro pax_erase_kstack
13342 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13343 + call pax_erase_kstack
13344 +#endif
13345 +.endm
13346 +
13347 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13348 +/*
13349 + * r10: thread_info
13350 + * rcx, rdx: can be clobbered
13351 + */
13352 +ENTRY(pax_erase_kstack)
13353 + pushq %rdi
13354 + pushq %rax
13355 +
13356 + GET_THREAD_INFO(%r10)
13357 + mov TI_lowest_stack(%r10), %rdi
13358 + mov $-0xBEEF, %rax
13359 + std
13360 +
13361 +1: mov %edi, %ecx
13362 + and $THREAD_SIZE_asm - 1, %ecx
13363 + shr $3, %ecx
13364 + repne scasq
13365 + jecxz 2f
13366 +
13367 + cmp $2*8, %ecx
13368 + jc 2f
13369 +
13370 + mov $2*8, %ecx
13371 + repe scasq
13372 + jecxz 2f
13373 + jne 1b
13374 +
13375 +2: cld
13376 + mov %esp, %ecx
13377 + sub %edi, %ecx
13378 + shr $3, %ecx
13379 + rep stosq
13380 +
13381 + mov TI_task_thread_sp0(%r10), %rdi
13382 + sub $256, %rdi
13383 + mov %rdi, TI_lowest_stack(%r10)
13384 +
13385 + popq %rax
13386 + popq %rdi
13387 + ret
13388 +ENDPROC(pax_erase_kstack)
13389 +#endif
13390
13391 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13392 #ifdef CONFIG_TRACE_IRQFLAGS
13393 @@ -317,7 +569,7 @@ ENTRY(save_args)
13394 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13395 movq_cfi rbp, 8 /* push %rbp */
13396 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13397 - testl $3, CS(%rdi)
13398 + testb $3, CS(%rdi)
13399 je 1f
13400 SWAPGS
13401 /*
13402 @@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
13403
13404 RESTORE_REST
13405
13406 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13407 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13408 je int_ret_from_sys_call
13409
13410 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13411 @@ -455,7 +707,7 @@ END(ret_from_fork)
13412 ENTRY(system_call)
13413 CFI_STARTPROC simple
13414 CFI_SIGNAL_FRAME
13415 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13416 + CFI_DEF_CFA rsp,0
13417 CFI_REGISTER rip,rcx
13418 /*CFI_REGISTER rflags,r11*/
13419 SWAPGS_UNSAFE_STACK
13420 @@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
13421
13422 movq %rsp,PER_CPU_VAR(old_rsp)
13423 movq PER_CPU_VAR(kernel_stack),%rsp
13424 + pax_enter_kernel_user
13425 /*
13426 * No need to follow this irqs off/on section - it's straight
13427 * and short:
13428 */
13429 ENABLE_INTERRUPTS(CLBR_NONE)
13430 - SAVE_ARGS 8,1
13431 + SAVE_ARGS 8*6,1
13432 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13433 movq %rcx,RIP-ARGOFFSET(%rsp)
13434 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13435 @@ -502,6 +755,7 @@ sysret_check:
13436 andl %edi,%edx
13437 jnz sysret_careful
13438 CFI_REMEMBER_STATE
13439 + pax_exit_kernel_user
13440 /*
13441 * sysretq will re-enable interrupts:
13442 */
13443 @@ -562,6 +816,9 @@ auditsys:
13444 movq %rax,%rsi /* 2nd arg: syscall number */
13445 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13446 call audit_syscall_entry
13447 +
13448 + pax_erase_kstack
13449 +
13450 LOAD_ARGS 0 /* reload call-clobbered registers */
13451 jmp system_call_fastpath
13452
13453 @@ -592,6 +849,9 @@ tracesys:
13454 FIXUP_TOP_OF_STACK %rdi
13455 movq %rsp,%rdi
13456 call syscall_trace_enter
13457 +
13458 + pax_erase_kstack
13459 +
13460 /*
13461 * Reload arg registers from stack in case ptrace changed them.
13462 * We don't reload %rax because syscall_trace_enter() returned
13463 @@ -613,7 +873,7 @@ tracesys:
13464 GLOBAL(int_ret_from_sys_call)
13465 DISABLE_INTERRUPTS(CLBR_NONE)
13466 TRACE_IRQS_OFF
13467 - testl $3,CS-ARGOFFSET(%rsp)
13468 + testb $3,CS-ARGOFFSET(%rsp)
13469 je retint_restore_args
13470 movl $_TIF_ALLWORK_MASK,%edi
13471 /* edi: mask to check */
13472 @@ -800,6 +1060,16 @@ END(interrupt)
13473 CFI_ADJUST_CFA_OFFSET 10*8
13474 call save_args
13475 PARTIAL_FRAME 0
13476 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13477 + testb $3, CS(%rdi)
13478 + jnz 1f
13479 + pax_enter_kernel
13480 + jmp 2f
13481 +1: pax_enter_kernel_user
13482 +2:
13483 +#else
13484 + pax_enter_kernel
13485 +#endif
13486 call \func
13487 .endm
13488
13489 @@ -822,7 +1092,7 @@ ret_from_intr:
13490 CFI_ADJUST_CFA_OFFSET -8
13491 exit_intr:
13492 GET_THREAD_INFO(%rcx)
13493 - testl $3,CS-ARGOFFSET(%rsp)
13494 + testb $3,CS-ARGOFFSET(%rsp)
13495 je retint_kernel
13496
13497 /* Interrupt came from user space */
13498 @@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
13499 * The iretq could re-enable interrupts:
13500 */
13501 DISABLE_INTERRUPTS(CLBR_ANY)
13502 + pax_exit_kernel_user
13503 TRACE_IRQS_IRETQ
13504 SWAPGS
13505 jmp restore_args
13506
13507 retint_restore_args: /* return to kernel space */
13508 DISABLE_INTERRUPTS(CLBR_ANY)
13509 + pax_exit_kernel
13510 /*
13511 * The iretq could re-enable interrupts:
13512 */
13513 @@ -1032,6 +1304,16 @@ ENTRY(\sym)
13514 CFI_ADJUST_CFA_OFFSET 15*8
13515 call error_entry
13516 DEFAULT_FRAME 0
13517 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13518 + testb $3, CS(%rsp)
13519 + jnz 1f
13520 + pax_enter_kernel
13521 + jmp 2f
13522 +1: pax_enter_kernel_user
13523 +2:
13524 +#else
13525 + pax_enter_kernel
13526 +#endif
13527 movq %rsp,%rdi /* pt_regs pointer */
13528 xorl %esi,%esi /* no error code */
13529 call \do_sym
13530 @@ -1049,6 +1331,16 @@ ENTRY(\sym)
13531 subq $15*8, %rsp
13532 call save_paranoid
13533 TRACE_IRQS_OFF
13534 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13535 + testb $3, CS(%rsp)
13536 + jnz 1f
13537 + pax_enter_kernel
13538 + jmp 2f
13539 +1: pax_enter_kernel_user
13540 +2:
13541 +#else
13542 + pax_enter_kernel
13543 +#endif
13544 movq %rsp,%rdi /* pt_regs pointer */
13545 xorl %esi,%esi /* no error code */
13546 call \do_sym
13547 @@ -1066,9 +1358,24 @@ ENTRY(\sym)
13548 subq $15*8, %rsp
13549 call save_paranoid
13550 TRACE_IRQS_OFF
13551 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13552 + testb $3, CS(%rsp)
13553 + jnz 1f
13554 + pax_enter_kernel
13555 + jmp 2f
13556 +1: pax_enter_kernel_user
13557 +2:
13558 +#else
13559 + pax_enter_kernel
13560 +#endif
13561 movq %rsp,%rdi /* pt_regs pointer */
13562 xorl %esi,%esi /* no error code */
13563 - PER_CPU(init_tss, %rbp)
13564 +#ifdef CONFIG_SMP
13565 + imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
13566 + lea init_tss(%rbp), %rbp
13567 +#else
13568 + lea init_tss(%rip), %rbp
13569 +#endif
13570 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13571 call \do_sym
13572 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13573 @@ -1085,6 +1392,16 @@ ENTRY(\sym)
13574 CFI_ADJUST_CFA_OFFSET 15*8
13575 call error_entry
13576 DEFAULT_FRAME 0
13577 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13578 + testb $3, CS(%rsp)
13579 + jnz 1f
13580 + pax_enter_kernel
13581 + jmp 2f
13582 +1: pax_enter_kernel_user
13583 +2:
13584 +#else
13585 + pax_enter_kernel
13586 +#endif
13587 movq %rsp,%rdi /* pt_regs pointer */
13588 movq ORIG_RAX(%rsp),%rsi /* get error code */
13589 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13590 @@ -1104,6 +1421,16 @@ ENTRY(\sym)
13591 call save_paranoid
13592 DEFAULT_FRAME 0
13593 TRACE_IRQS_OFF
13594 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13595 + testb $3, CS(%rsp)
13596 + jnz 1f
13597 + pax_enter_kernel
13598 + jmp 2f
13599 +1: pax_enter_kernel_user
13600 +2:
13601 +#else
13602 + pax_enter_kernel
13603 +#endif
13604 movq %rsp,%rdi /* pt_regs pointer */
13605 movq ORIG_RAX(%rsp),%rsi /* get error code */
13606 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13607 @@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
13608 TRACE_IRQS_OFF
13609 testl %ebx,%ebx /* swapgs needed? */
13610 jnz paranoid_restore
13611 - testl $3,CS(%rsp)
13612 + testb $3,CS(%rsp)
13613 jnz paranoid_userspace
13614 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13615 + pax_exit_kernel
13616 + TRACE_IRQS_IRETQ 0
13617 + SWAPGS_UNSAFE_STACK
13618 + RESTORE_ALL 8
13619 + jmp irq_return
13620 +#endif
13621 paranoid_swapgs:
13622 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13623 + pax_exit_kernel_user
13624 +#else
13625 + pax_exit_kernel
13626 +#endif
13627 TRACE_IRQS_IRETQ 0
13628 SWAPGS_UNSAFE_STACK
13629 RESTORE_ALL 8
13630 jmp irq_return
13631 paranoid_restore:
13632 + pax_exit_kernel
13633 TRACE_IRQS_IRETQ 0
13634 RESTORE_ALL 8
13635 jmp irq_return
13636 @@ -1470,7 +1810,7 @@ ENTRY(error_entry)
13637 movq_cfi r14, R14+8
13638 movq_cfi r15, R15+8
13639 xorl %ebx,%ebx
13640 - testl $3,CS+8(%rsp)
13641 + testb $3,CS+8(%rsp)
13642 je error_kernelspace
13643 error_swapgs:
13644 SWAPGS
13645 @@ -1529,6 +1869,16 @@ ENTRY(nmi)
13646 CFI_ADJUST_CFA_OFFSET 15*8
13647 call save_paranoid
13648 DEFAULT_FRAME 0
13649 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13650 + testb $3, CS(%rsp)
13651 + jnz 1f
13652 + pax_enter_kernel
13653 + jmp 2f
13654 +1: pax_enter_kernel_user
13655 +2:
13656 +#else
13657 + pax_enter_kernel
13658 +#endif
13659 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13660 movq %rsp,%rdi
13661 movq $-1,%rsi
13662 @@ -1539,11 +1889,25 @@ ENTRY(nmi)
13663 DISABLE_INTERRUPTS(CLBR_NONE)
13664 testl %ebx,%ebx /* swapgs needed? */
13665 jnz nmi_restore
13666 - testl $3,CS(%rsp)
13667 + testb $3,CS(%rsp)
13668 jnz nmi_userspace
13669 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13670 + pax_exit_kernel
13671 + SWAPGS_UNSAFE_STACK
13672 + RESTORE_ALL 8
13673 + jmp irq_return
13674 +#endif
13675 nmi_swapgs:
13676 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13677 + pax_exit_kernel_user
13678 +#else
13679 + pax_exit_kernel
13680 +#endif
13681 SWAPGS_UNSAFE_STACK
13682 + RESTORE_ALL 8
13683 + jmp irq_return
13684 nmi_restore:
13685 + pax_exit_kernel
13686 RESTORE_ALL 8
13687 jmp irq_return
13688 nmi_userspace:
13689 diff -urNp linux-2.6.32.42/arch/x86/kernel/ftrace.c linux-2.6.32.42/arch/x86/kernel/ftrace.c
13690 --- linux-2.6.32.42/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
13691 +++ linux-2.6.32.42/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
13692 @@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
13693 static void *mod_code_newcode; /* holds the text to write to the IP */
13694
13695 static unsigned nmi_wait_count;
13696 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
13697 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13698
13699 int ftrace_arch_read_dyn_info(char *buf, int size)
13700 {
13701 @@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13702
13703 r = snprintf(buf, size, "%u %u",
13704 nmi_wait_count,
13705 - atomic_read(&nmi_update_count));
13706 + atomic_read_unchecked(&nmi_update_count));
13707 return r;
13708 }
13709
13710 @@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
13711 {
13712 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13713 smp_rmb();
13714 + pax_open_kernel();
13715 ftrace_mod_code();
13716 - atomic_inc(&nmi_update_count);
13717 + pax_close_kernel();
13718 + atomic_inc_unchecked(&nmi_update_count);
13719 }
13720 /* Must have previous changes seen before executions */
13721 smp_mb();
13722 @@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
13723
13724
13725
13726 -static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
13727 +static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
13728
13729 static unsigned char *ftrace_nop_replace(void)
13730 {
13731 @@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
13732 {
13733 unsigned char replaced[MCOUNT_INSN_SIZE];
13734
13735 + ip = ktla_ktva(ip);
13736 +
13737 /*
13738 * Note: Due to modules and __init, code can
13739 * disappear and change, we need to protect against faulting
13740 @@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13741 unsigned char old[MCOUNT_INSN_SIZE], *new;
13742 int ret;
13743
13744 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13745 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13746 new = ftrace_call_replace(ip, (unsigned long)func);
13747 ret = ftrace_modify_code(ip, old, new);
13748
13749 @@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
13750 switch (faulted) {
13751 case 0:
13752 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
13753 - memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
13754 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
13755 break;
13756 case 1:
13757 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
13758 - memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
13759 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
13760 break;
13761 case 2:
13762 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
13763 - memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
13764 + memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
13765 break;
13766 }
13767
13768 @@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
13769 {
13770 unsigned char code[MCOUNT_INSN_SIZE];
13771
13772 + ip = ktla_ktva(ip);
13773 +
13774 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
13775 return -EFAULT;
13776
13777 diff -urNp linux-2.6.32.42/arch/x86/kernel/head32.c linux-2.6.32.42/arch/x86/kernel/head32.c
13778 --- linux-2.6.32.42/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
13779 +++ linux-2.6.32.42/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
13780 @@ -16,6 +16,7 @@
13781 #include <asm/apic.h>
13782 #include <asm/io_apic.h>
13783 #include <asm/bios_ebda.h>
13784 +#include <asm/boot.h>
13785
13786 static void __init i386_default_early_setup(void)
13787 {
13788 @@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
13789 {
13790 reserve_trampoline_memory();
13791
13792 - reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13793 + reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
13794
13795 #ifdef CONFIG_BLK_DEV_INITRD
13796 /* Reserve INITRD */
13797 diff -urNp linux-2.6.32.42/arch/x86/kernel/head_32.S linux-2.6.32.42/arch/x86/kernel/head_32.S
13798 --- linux-2.6.32.42/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
13799 +++ linux-2.6.32.42/arch/x86/kernel/head_32.S 2011-04-17 15:56:46.000000000 -0400
13800 @@ -19,10 +19,17 @@
13801 #include <asm/setup.h>
13802 #include <asm/processor-flags.h>
13803 #include <asm/percpu.h>
13804 +#include <asm/msr-index.h>
13805
13806 /* Physical address */
13807 #define pa(X) ((X) - __PAGE_OFFSET)
13808
13809 +#ifdef CONFIG_PAX_KERNEXEC
13810 +#define ta(X) (X)
13811 +#else
13812 +#define ta(X) ((X) - __PAGE_OFFSET)
13813 +#endif
13814 +
13815 /*
13816 * References to members of the new_cpu_data structure.
13817 */
13818 @@ -52,11 +59,7 @@
13819 * and small than max_low_pfn, otherwise will waste some page table entries
13820 */
13821
13822 -#if PTRS_PER_PMD > 1
13823 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
13824 -#else
13825 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
13826 -#endif
13827 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
13828
13829 /* Enough space to fit pagetables for the low memory linear map */
13830 MAPPING_BEYOND_END = \
13831 @@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
13832 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13833
13834 /*
13835 + * Real beginning of normal "text" segment
13836 + */
13837 +ENTRY(stext)
13838 +ENTRY(_stext)
13839 +
13840 +/*
13841 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
13842 * %esi points to the real-mode code as a 32-bit pointer.
13843 * CS and DS must be 4 GB flat segments, but we don't depend on
13844 @@ -80,6 +89,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
13845 * can.
13846 */
13847 __HEAD
13848 +
13849 +#ifdef CONFIG_PAX_KERNEXEC
13850 + jmp startup_32
13851 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
13852 +.fill PAGE_SIZE-5,1,0xcc
13853 +#endif
13854 +
13855 ENTRY(startup_32)
13856 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
13857 us to not reload segments */
13858 @@ -97,6 +113,57 @@ ENTRY(startup_32)
13859 movl %eax,%gs
13860 2:
13861
13862 +#ifdef CONFIG_SMP
13863 + movl $pa(cpu_gdt_table),%edi
13864 + movl $__per_cpu_load,%eax
13865 + movw %ax,__KERNEL_PERCPU + 2(%edi)
13866 + rorl $16,%eax
13867 + movb %al,__KERNEL_PERCPU + 4(%edi)
13868 + movb %ah,__KERNEL_PERCPU + 7(%edi)
13869 + movl $__per_cpu_end - 1,%eax
13870 + subl $__per_cpu_start,%eax
13871 + movw %ax,__KERNEL_PERCPU + 0(%edi)
13872 +#endif
13873 +
13874 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13875 + movl $NR_CPUS,%ecx
13876 + movl $pa(cpu_gdt_table),%edi
13877 +1:
13878 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
13879 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
13880 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
13881 + addl $PAGE_SIZE_asm,%edi
13882 + loop 1b
13883 +#endif
13884 +
13885 +#ifdef CONFIG_PAX_KERNEXEC
13886 + movl $pa(boot_gdt),%edi
13887 + movl $__LOAD_PHYSICAL_ADDR,%eax
13888 + movw %ax,__BOOT_CS + 2(%edi)
13889 + rorl $16,%eax
13890 + movb %al,__BOOT_CS + 4(%edi)
13891 + movb %ah,__BOOT_CS + 7(%edi)
13892 + rorl $16,%eax
13893 +
13894 + ljmp $(__BOOT_CS),$1f
13895 +1:
13896 +
13897 + movl $NR_CPUS,%ecx
13898 + movl $pa(cpu_gdt_table),%edi
13899 + addl $__PAGE_OFFSET,%eax
13900 +1:
13901 + movw %ax,__KERNEL_CS + 2(%edi)
13902 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
13903 + rorl $16,%eax
13904 + movb %al,__KERNEL_CS + 4(%edi)
13905 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
13906 + movb %ah,__KERNEL_CS + 7(%edi)
13907 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
13908 + rorl $16,%eax
13909 + addl $PAGE_SIZE_asm,%edi
13910 + loop 1b
13911 +#endif
13912 +
13913 /*
13914 * Clear BSS first so that there are no surprises...
13915 */
13916 @@ -140,9 +207,7 @@ ENTRY(startup_32)
13917 cmpl $num_subarch_entries, %eax
13918 jae bad_subarch
13919
13920 - movl pa(subarch_entries)(,%eax,4), %eax
13921 - subl $__PAGE_OFFSET, %eax
13922 - jmp *%eax
13923 + jmp *pa(subarch_entries)(,%eax,4)
13924
13925 bad_subarch:
13926 WEAK(lguest_entry)
13927 @@ -154,10 +219,10 @@ WEAK(xen_entry)
13928 __INITDATA
13929
13930 subarch_entries:
13931 - .long default_entry /* normal x86/PC */
13932 - .long lguest_entry /* lguest hypervisor */
13933 - .long xen_entry /* Xen hypervisor */
13934 - .long default_entry /* Moorestown MID */
13935 + .long ta(default_entry) /* normal x86/PC */
13936 + .long ta(lguest_entry) /* lguest hypervisor */
13937 + .long ta(xen_entry) /* Xen hypervisor */
13938 + .long ta(default_entry) /* Moorestown MID */
13939 num_subarch_entries = (. - subarch_entries) / 4
13940 .previous
13941 #endif /* CONFIG_PARAVIRT */
13942 @@ -218,8 +283,11 @@ default_entry:
13943 movl %eax, pa(max_pfn_mapped)
13944
13945 /* Do early initialization of the fixmap area */
13946 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
13947 - movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13948 +#ifdef CONFIG_COMPAT_VDSO
13949 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13950 +#else
13951 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
13952 +#endif
13953 #else /* Not PAE */
13954
13955 page_pde_offset = (__PAGE_OFFSET >> 20);
13956 @@ -249,8 +317,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
13957 movl %eax, pa(max_pfn_mapped)
13958
13959 /* Do early initialization of the fixmap area */
13960 - movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
13961 - movl %eax,pa(swapper_pg_dir+0xffc)
13962 +#ifdef CONFIG_COMPAT_VDSO
13963 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
13964 +#else
13965 + movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
13966 +#endif
13967 #endif
13968 jmp 3f
13969 /*
13970 @@ -297,6 +368,7 @@ ENTRY(startup_32_smp)
13971 orl %edx,%eax
13972 movl %eax,%cr4
13973
13974 +#ifdef CONFIG_X86_PAE
13975 btl $5, %eax # check if PAE is enabled
13976 jnc 6f
13977
13978 @@ -312,13 +384,17 @@ ENTRY(startup_32_smp)
13979 jnc 6f
13980
13981 /* Setup EFER (Extended Feature Enable Register) */
13982 - movl $0xc0000080, %ecx
13983 + movl $MSR_EFER, %ecx
13984 rdmsr
13985
13986 btsl $11, %eax
13987 /* Make changes effective */
13988 wrmsr
13989
13990 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
13991 + movl $1,pa(nx_enabled)
13992 +#endif
13993 +
13994 6:
13995
13996 /*
13997 @@ -344,9 +420,7 @@ ENTRY(startup_32_smp)
13998
13999 #ifdef CONFIG_SMP
14000 cmpb $0, ready
14001 - jz 1f /* Initial CPU cleans BSS */
14002 - jmp checkCPUtype
14003 -1:
14004 + jnz checkCPUtype /* Initial CPU cleans BSS */
14005 #endif /* CONFIG_SMP */
14006
14007 /*
14008 @@ -424,7 +498,7 @@ is386: movl $2,%ecx # set MP
14009 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14010 movl %eax,%ss # after changing gdt.
14011
14012 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
14013 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14014 movl %eax,%ds
14015 movl %eax,%es
14016
14017 @@ -438,15 +512,22 @@ is386: movl $2,%ecx # set MP
14018 */
14019 cmpb $0,ready
14020 jne 1f
14021 - movl $per_cpu__gdt_page,%eax
14022 + movl $cpu_gdt_table,%eax
14023 movl $per_cpu__stack_canary,%ecx
14024 +#ifdef CONFIG_SMP
14025 + addl $__per_cpu_load,%ecx
14026 +#endif
14027 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14028 shrl $16, %ecx
14029 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14030 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14031 1:
14032 -#endif
14033 movl $(__KERNEL_STACK_CANARY),%eax
14034 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14035 + movl $(__USER_DS),%eax
14036 +#else
14037 + xorl %eax,%eax
14038 +#endif
14039 movl %eax,%gs
14040
14041 xorl %eax,%eax # Clear LDT
14042 @@ -457,10 +538,6 @@ is386: movl $2,%ecx # set MP
14043 #ifdef CONFIG_SMP
14044 movb ready, %cl
14045 movb $1, ready
14046 - cmpb $0,%cl # the first CPU calls start_kernel
14047 - je 1f
14048 - movl (stack_start), %esp
14049 -1:
14050 #endif /* CONFIG_SMP */
14051 jmp *(initial_code)
14052
14053 @@ -546,22 +623,22 @@ early_page_fault:
14054 jmp early_fault
14055
14056 early_fault:
14057 - cld
14058 #ifdef CONFIG_PRINTK
14059 + cmpl $1,%ss:early_recursion_flag
14060 + je hlt_loop
14061 + incl %ss:early_recursion_flag
14062 + cld
14063 pusha
14064 movl $(__KERNEL_DS),%eax
14065 movl %eax,%ds
14066 movl %eax,%es
14067 - cmpl $2,early_recursion_flag
14068 - je hlt_loop
14069 - incl early_recursion_flag
14070 movl %cr2,%eax
14071 pushl %eax
14072 pushl %edx /* trapno */
14073 pushl $fault_msg
14074 call printk
14075 +; call dump_stack
14076 #endif
14077 - call dump_stack
14078 hlt_loop:
14079 hlt
14080 jmp hlt_loop
14081 @@ -569,8 +646,11 @@ hlt_loop:
14082 /* This is the default interrupt "handler" :-) */
14083 ALIGN
14084 ignore_int:
14085 - cld
14086 #ifdef CONFIG_PRINTK
14087 + cmpl $2,%ss:early_recursion_flag
14088 + je hlt_loop
14089 + incl %ss:early_recursion_flag
14090 + cld
14091 pushl %eax
14092 pushl %ecx
14093 pushl %edx
14094 @@ -579,9 +659,6 @@ ignore_int:
14095 movl $(__KERNEL_DS),%eax
14096 movl %eax,%ds
14097 movl %eax,%es
14098 - cmpl $2,early_recursion_flag
14099 - je hlt_loop
14100 - incl early_recursion_flag
14101 pushl 16(%esp)
14102 pushl 24(%esp)
14103 pushl 32(%esp)
14104 @@ -610,31 +687,47 @@ ENTRY(initial_page_table)
14105 /*
14106 * BSS section
14107 */
14108 -__PAGE_ALIGNED_BSS
14109 - .align PAGE_SIZE_asm
14110 #ifdef CONFIG_X86_PAE
14111 +.section .swapper_pg_pmd,"a",@progbits
14112 swapper_pg_pmd:
14113 .fill 1024*KPMDS,4,0
14114 #else
14115 +.section .swapper_pg_dir,"a",@progbits
14116 ENTRY(swapper_pg_dir)
14117 .fill 1024,4,0
14118 #endif
14119 +.section .swapper_pg_fixmap,"a",@progbits
14120 swapper_pg_fixmap:
14121 .fill 1024,4,0
14122 #ifdef CONFIG_X86_TRAMPOLINE
14123 +.section .trampoline_pg_dir,"a",@progbits
14124 ENTRY(trampoline_pg_dir)
14125 +#ifdef CONFIG_X86_PAE
14126 + .fill 4,8,0
14127 +#else
14128 .fill 1024,4,0
14129 #endif
14130 +#endif
14131 +
14132 +.section .empty_zero_page,"a",@progbits
14133 ENTRY(empty_zero_page)
14134 .fill 4096,1,0
14135
14136 /*
14137 + * The IDT has to be page-aligned to simplify the Pentium
14138 + * F0 0F bug workaround.. We have a special link segment
14139 + * for this.
14140 + */
14141 +.section .idt,"a",@progbits
14142 +ENTRY(idt_table)
14143 + .fill 256,8,0
14144 +
14145 +/*
14146 * This starts the data section.
14147 */
14148 #ifdef CONFIG_X86_PAE
14149 -__PAGE_ALIGNED_DATA
14150 - /* Page-aligned for the benefit of paravirt? */
14151 - .align PAGE_SIZE_asm
14152 +.section .swapper_pg_dir,"a",@progbits
14153 +
14154 ENTRY(swapper_pg_dir)
14155 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14156 # if KPMDS == 3
14157 @@ -653,15 +746,24 @@ ENTRY(swapper_pg_dir)
14158 # error "Kernel PMDs should be 1, 2 or 3"
14159 # endif
14160 .align PAGE_SIZE_asm /* needs to be page-sized too */
14161 +
14162 +#ifdef CONFIG_PAX_PER_CPU_PGD
14163 +ENTRY(cpu_pgd)
14164 + .rept NR_CPUS
14165 + .fill 4,8,0
14166 + .endr
14167 +#endif
14168 +
14169 #endif
14170
14171 .data
14172 ENTRY(stack_start)
14173 - .long init_thread_union+THREAD_SIZE
14174 + .long init_thread_union+THREAD_SIZE-8
14175 .long __BOOT_DS
14176
14177 ready: .byte 0
14178
14179 +.section .rodata,"a",@progbits
14180 early_recursion_flag:
14181 .long 0
14182
14183 @@ -697,7 +799,7 @@ fault_msg:
14184 .word 0 # 32 bit align gdt_desc.address
14185 boot_gdt_descr:
14186 .word __BOOT_DS+7
14187 - .long boot_gdt - __PAGE_OFFSET
14188 + .long pa(boot_gdt)
14189
14190 .word 0 # 32-bit align idt_desc.address
14191 idt_descr:
14192 @@ -708,7 +810,7 @@ idt_descr:
14193 .word 0 # 32 bit align gdt_desc.address
14194 ENTRY(early_gdt_descr)
14195 .word GDT_ENTRIES*8-1
14196 - .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14197 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
14198
14199 /*
14200 * The boot_gdt must mirror the equivalent in setup.S and is
14201 @@ -717,5 +819,65 @@ ENTRY(early_gdt_descr)
14202 .align L1_CACHE_BYTES
14203 ENTRY(boot_gdt)
14204 .fill GDT_ENTRY_BOOT_CS,8,0
14205 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14206 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14207 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14208 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14209 +
14210 + .align PAGE_SIZE_asm
14211 +ENTRY(cpu_gdt_table)
14212 + .rept NR_CPUS
14213 + .quad 0x0000000000000000 /* NULL descriptor */
14214 + .quad 0x0000000000000000 /* 0x0b reserved */
14215 + .quad 0x0000000000000000 /* 0x13 reserved */
14216 + .quad 0x0000000000000000 /* 0x1b reserved */
14217 +
14218 +#ifdef CONFIG_PAX_KERNEXEC
14219 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14220 +#else
14221 + .quad 0x0000000000000000 /* 0x20 unused */
14222 +#endif
14223 +
14224 + .quad 0x0000000000000000 /* 0x28 unused */
14225 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14226 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14227 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14228 + .quad 0x0000000000000000 /* 0x4b reserved */
14229 + .quad 0x0000000000000000 /* 0x53 reserved */
14230 + .quad 0x0000000000000000 /* 0x5b reserved */
14231 +
14232 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14233 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14234 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14235 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14236 +
14237 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14238 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14239 +
14240 + /*
14241 + * Segments used for calling PnP BIOS have byte granularity.
14242 + * The code segments and data segments have fixed 64k limits,
14243 + * the transfer segment sizes are set at run time.
14244 + */
14245 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
14246 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
14247 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
14248 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
14249 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
14250 +
14251 + /*
14252 + * The APM segments have byte granularity and their bases
14253 + * are set at run time. All have 64k limits.
14254 + */
14255 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14256 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14257 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
14258 +
14259 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14260 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14261 + .quad 0x0040910000000018 /* 0xe0 - STACK_CANARY */
14262 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14263 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14264 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14265 +
14266 + /* Be sure this is zeroed to avoid false validations in Xen */
14267 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14268 + .endr
14269 diff -urNp linux-2.6.32.42/arch/x86/kernel/head_64.S linux-2.6.32.42/arch/x86/kernel/head_64.S
14270 --- linux-2.6.32.42/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14271 +++ linux-2.6.32.42/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14272 @@ -19,6 +19,7 @@
14273 #include <asm/cache.h>
14274 #include <asm/processor-flags.h>
14275 #include <asm/percpu.h>
14276 +#include <asm/cpufeature.h>
14277
14278 #ifdef CONFIG_PARAVIRT
14279 #include <asm/asm-offsets.h>
14280 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14281 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14282 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14283 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14284 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
14285 +L3_VMALLOC_START = pud_index(VMALLOC_START)
14286 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14287 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14288
14289 .text
14290 __HEAD
14291 @@ -85,35 +90,22 @@ startup_64:
14292 */
14293 addq %rbp, init_level4_pgt + 0(%rip)
14294 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14295 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14296 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14297 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14298
14299 addq %rbp, level3_ident_pgt + 0(%rip)
14300 +#ifndef CONFIG_XEN
14301 + addq %rbp, level3_ident_pgt + 8(%rip)
14302 +#endif
14303
14304 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14305 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14306 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14307
14308 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14309 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14310 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14311
14312 - /* Add an Identity mapping if I am above 1G */
14313 - leaq _text(%rip), %rdi
14314 - andq $PMD_PAGE_MASK, %rdi
14315 -
14316 - movq %rdi, %rax
14317 - shrq $PUD_SHIFT, %rax
14318 - andq $(PTRS_PER_PUD - 1), %rax
14319 - jz ident_complete
14320 -
14321 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14322 - leaq level3_ident_pgt(%rip), %rbx
14323 - movq %rdx, 0(%rbx, %rax, 8)
14324 -
14325 - movq %rdi, %rax
14326 - shrq $PMD_SHIFT, %rax
14327 - andq $(PTRS_PER_PMD - 1), %rax
14328 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14329 - leaq level2_spare_pgt(%rip), %rbx
14330 - movq %rdx, 0(%rbx, %rax, 8)
14331 -ident_complete:
14332 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14333 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14334
14335 /*
14336 * Fixup the kernel text+data virtual addresses. Note that
14337 @@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14338 * after the boot processor executes this code.
14339 */
14340
14341 - /* Enable PAE mode and PGE */
14342 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14343 + /* Enable PAE mode and PSE/PGE */
14344 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14345 movq %rax, %cr4
14346
14347 /* Setup early boot stage 4 level pagetables. */
14348 @@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14349 movl $MSR_EFER, %ecx
14350 rdmsr
14351 btsl $_EFER_SCE, %eax /* Enable System Call */
14352 - btl $20,%edi /* No Execute supported? */
14353 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14354 jnc 1f
14355 btsl $_EFER_NX, %eax
14356 + leaq init_level4_pgt(%rip), %rdi
14357 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14358 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14359 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14360 1: wrmsr /* Make changes effective */
14361
14362 /* Setup cr0 */
14363 @@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14364 .quad x86_64_start_kernel
14365 ENTRY(initial_gs)
14366 .quad INIT_PER_CPU_VAR(irq_stack_union)
14367 - __FINITDATA
14368
14369 ENTRY(stack_start)
14370 .quad init_thread_union+THREAD_SIZE-8
14371 .word 0
14372 + __FINITDATA
14373
14374 bad_address:
14375 jmp bad_address
14376
14377 - .section ".init.text","ax"
14378 + __INIT
14379 #ifdef CONFIG_EARLY_PRINTK
14380 .globl early_idt_handlers
14381 early_idt_handlers:
14382 @@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
14383 #endif /* EARLY_PRINTK */
14384 1: hlt
14385 jmp 1b
14386 + .previous
14387
14388 #ifdef CONFIG_EARLY_PRINTK
14389 + __INITDATA
14390 early_recursion_flag:
14391 .long 0
14392 + .previous
14393
14394 + .section .rodata,"a",@progbits
14395 early_idt_msg:
14396 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14397 early_idt_ripmsg:
14398 .asciz "RIP %s\n"
14399 -#endif /* CONFIG_EARLY_PRINTK */
14400 .previous
14401 +#endif /* CONFIG_EARLY_PRINTK */
14402
14403 + .section .rodata,"a",@progbits
14404 #define NEXT_PAGE(name) \
14405 .balign PAGE_SIZE; \
14406 ENTRY(name)
14407 @@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
14408 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14409 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14410 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14411 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
14412 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14413 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14414 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14415 .org init_level4_pgt + L4_START_KERNEL*8, 0
14416 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14417 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14418
14419 +#ifdef CONFIG_PAX_PER_CPU_PGD
14420 +NEXT_PAGE(cpu_pgd)
14421 + .rept NR_CPUS
14422 + .fill 512,8,0
14423 + .endr
14424 +#endif
14425 +
14426 NEXT_PAGE(level3_ident_pgt)
14427 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14428 +#ifdef CONFIG_XEN
14429 .fill 511,8,0
14430 +#else
14431 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14432 + .fill 510,8,0
14433 +#endif
14434 +
14435 +NEXT_PAGE(level3_vmalloc_pgt)
14436 + .fill 512,8,0
14437 +
14438 +NEXT_PAGE(level3_vmemmap_pgt)
14439 + .fill L3_VMEMMAP_START,8,0
14440 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14441
14442 NEXT_PAGE(level3_kernel_pgt)
14443 .fill L3_START_KERNEL,8,0
14444 @@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
14445 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14446 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14447
14448 +NEXT_PAGE(level2_vmemmap_pgt)
14449 + .fill 512,8,0
14450 +
14451 NEXT_PAGE(level2_fixmap_pgt)
14452 - .fill 506,8,0
14453 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14454 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14455 - .fill 5,8,0
14456 + .fill 507,8,0
14457 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14458 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14459 + .fill 4,8,0
14460
14461 -NEXT_PAGE(level1_fixmap_pgt)
14462 +NEXT_PAGE(level1_vsyscall_pgt)
14463 .fill 512,8,0
14464
14465 -NEXT_PAGE(level2_ident_pgt)
14466 - /* Since I easily can, map the first 1G.
14467 + /* Since I easily can, map the first 2G.
14468 * Don't set NX because code runs from these pages.
14469 */
14470 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14471 +NEXT_PAGE(level2_ident_pgt)
14472 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14473
14474 NEXT_PAGE(level2_kernel_pgt)
14475 /*
14476 @@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
14477 * If you want to increase this then increase MODULES_VADDR
14478 * too.)
14479 */
14480 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14481 - KERNEL_IMAGE_SIZE/PMD_SIZE)
14482 -
14483 -NEXT_PAGE(level2_spare_pgt)
14484 - .fill 512, 8, 0
14485 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14486
14487 #undef PMDS
14488 #undef NEXT_PAGE
14489
14490 - .data
14491 + .align PAGE_SIZE
14492 +ENTRY(cpu_gdt_table)
14493 + .rept NR_CPUS
14494 + .quad 0x0000000000000000 /* NULL descriptor */
14495 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14496 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
14497 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
14498 + .quad 0x00cffb000000ffff /* __USER32_CS */
14499 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14500 + .quad 0x00affb000000ffff /* __USER_CS */
14501 +
14502 +#ifdef CONFIG_PAX_KERNEXEC
14503 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14504 +#else
14505 + .quad 0x0 /* unused */
14506 +#endif
14507 +
14508 + .quad 0,0 /* TSS */
14509 + .quad 0,0 /* LDT */
14510 + .quad 0,0,0 /* three TLS descriptors */
14511 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
14512 + /* asm/segment.h:GDT_ENTRIES must match this */
14513 +
14514 + /* zero the remaining page */
14515 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14516 + .endr
14517 +
14518 .align 16
14519 .globl early_gdt_descr
14520 early_gdt_descr:
14521 .word GDT_ENTRIES*8-1
14522 early_gdt_descr_base:
14523 - .quad INIT_PER_CPU_VAR(gdt_page)
14524 + .quad cpu_gdt_table
14525
14526 ENTRY(phys_base)
14527 /* This must match the first entry in level2_kernel_pgt */
14528 .quad 0x0000000000000000
14529
14530 #include "../../x86/xen/xen-head.S"
14531 -
14532 - .section .bss, "aw", @nobits
14533 +
14534 + .section .rodata,"a",@progbits
14535 .align L1_CACHE_BYTES
14536 ENTRY(idt_table)
14537 - .skip IDT_ENTRIES * 16
14538 + .fill 512,8,0
14539
14540 __PAGE_ALIGNED_BSS
14541 .align PAGE_SIZE
14542 diff -urNp linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c
14543 --- linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
14544 +++ linux-2.6.32.42/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
14545 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
14546 EXPORT_SYMBOL(cmpxchg8b_emu);
14547 #endif
14548
14549 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
14550 +
14551 /* Networking helper routines. */
14552 EXPORT_SYMBOL(csum_partial_copy_generic);
14553 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
14554 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
14555
14556 EXPORT_SYMBOL(__get_user_1);
14557 EXPORT_SYMBOL(__get_user_2);
14558 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
14559
14560 EXPORT_SYMBOL(csum_partial);
14561 EXPORT_SYMBOL(empty_zero_page);
14562 +
14563 +#ifdef CONFIG_PAX_KERNEXEC
14564 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
14565 +#endif
14566 diff -urNp linux-2.6.32.42/arch/x86/kernel/i8259.c linux-2.6.32.42/arch/x86/kernel/i8259.c
14567 --- linux-2.6.32.42/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
14568 +++ linux-2.6.32.42/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
14569 @@ -208,7 +208,7 @@ spurious_8259A_irq:
14570 "spurious 8259A interrupt: IRQ%d.\n", irq);
14571 spurious_irq_mask |= irqmask;
14572 }
14573 - atomic_inc(&irq_err_count);
14574 + atomic_inc_unchecked(&irq_err_count);
14575 /*
14576 * Theoretically we do not have to handle this IRQ,
14577 * but in Linux this does not cause problems and is
14578 diff -urNp linux-2.6.32.42/arch/x86/kernel/init_task.c linux-2.6.32.42/arch/x86/kernel/init_task.c
14579 --- linux-2.6.32.42/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
14580 +++ linux-2.6.32.42/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
14581 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
14582 * way process stacks are handled. This is done by having a special
14583 * "init_task" linker map entry..
14584 */
14585 -union thread_union init_thread_union __init_task_data =
14586 - { INIT_THREAD_INFO(init_task) };
14587 +union thread_union init_thread_union __init_task_data;
14588
14589 /*
14590 * Initial task structure.
14591 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
14592 * section. Since TSS's are completely CPU-local, we want them
14593 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
14594 */
14595 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
14596 -
14597 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
14598 +EXPORT_SYMBOL(init_tss);
14599 diff -urNp linux-2.6.32.42/arch/x86/kernel/ioport.c linux-2.6.32.42/arch/x86/kernel/ioport.c
14600 --- linux-2.6.32.42/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
14601 +++ linux-2.6.32.42/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
14602 @@ -6,6 +6,7 @@
14603 #include <linux/sched.h>
14604 #include <linux/kernel.h>
14605 #include <linux/capability.h>
14606 +#include <linux/security.h>
14607 #include <linux/errno.h>
14608 #include <linux/types.h>
14609 #include <linux/ioport.h>
14610 @@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
14611
14612 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
14613 return -EINVAL;
14614 +#ifdef CONFIG_GRKERNSEC_IO
14615 + if (turn_on && grsec_disable_privio) {
14616 + gr_handle_ioperm();
14617 + return -EPERM;
14618 + }
14619 +#endif
14620 if (turn_on && !capable(CAP_SYS_RAWIO))
14621 return -EPERM;
14622
14623 @@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
14624 * because the ->io_bitmap_max value must match the bitmap
14625 * contents:
14626 */
14627 - tss = &per_cpu(init_tss, get_cpu());
14628 + tss = init_tss + get_cpu();
14629
14630 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
14631
14632 @@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
14633 return -EINVAL;
14634 /* Trying to gain more privileges? */
14635 if (level > old) {
14636 +#ifdef CONFIG_GRKERNSEC_IO
14637 + if (grsec_disable_privio) {
14638 + gr_handle_iopl();
14639 + return -EPERM;
14640 + }
14641 +#endif
14642 if (!capable(CAP_SYS_RAWIO))
14643 return -EPERM;
14644 }
14645 diff -urNp linux-2.6.32.42/arch/x86/kernel/irq_32.c linux-2.6.32.42/arch/x86/kernel/irq_32.c
14646 --- linux-2.6.32.42/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
14647 +++ linux-2.6.32.42/arch/x86/kernel/irq_32.c 2011-04-23 13:26:46.000000000 -0400
14648 @@ -35,7 +35,7 @@ static int check_stack_overflow(void)
14649 __asm__ __volatile__("andl %%esp,%0" :
14650 "=r" (sp) : "0" (THREAD_SIZE - 1));
14651
14652 - return sp < (sizeof(struct thread_info) + STACK_WARN);
14653 + return sp < STACK_WARN;
14654 }
14655
14656 static void print_stack_overflow(void)
14657 @@ -54,9 +54,9 @@ static inline void print_stack_overflow(
14658 * per-CPU IRQ handling contexts (thread information and stack)
14659 */
14660 union irq_ctx {
14661 - struct thread_info tinfo;
14662 - u32 stack[THREAD_SIZE/sizeof(u32)];
14663 -} __attribute__((aligned(PAGE_SIZE)));
14664 + unsigned long previous_esp;
14665 + u32 stack[THREAD_SIZE/sizeof(u32)];
14666 +} __attribute__((aligned(THREAD_SIZE)));
14667
14668 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
14669 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
14670 @@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
14671 static inline int
14672 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
14673 {
14674 - union irq_ctx *curctx, *irqctx;
14675 + union irq_ctx *irqctx;
14676 u32 *isp, arg1, arg2;
14677
14678 - curctx = (union irq_ctx *) current_thread_info();
14679 irqctx = __get_cpu_var(hardirq_ctx);
14680
14681 /*
14682 @@ -90,21 +89,17 @@ execute_on_irq_stack(int overflow, struc
14683 * handler) we can't do that and just have to keep using the
14684 * current stack (which is the irq stack already after all)
14685 */
14686 - if (unlikely(curctx == irqctx))
14687 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14688 return 0;
14689
14690 /* build the stack frame on the IRQ stack */
14691 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14692 - irqctx->tinfo.task = curctx->tinfo.task;
14693 - irqctx->tinfo.previous_esp = current_stack_pointer;
14694 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14695 + irqctx->previous_esp = current_stack_pointer;
14696 + add_preempt_count(HARDIRQ_OFFSET);
14697
14698 - /*
14699 - * Copy the softirq bits in preempt_count so that the
14700 - * softirq checks work in the hardirq context.
14701 - */
14702 - irqctx->tinfo.preempt_count =
14703 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
14704 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
14705 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14706 + __set_fs(MAKE_MM_SEG(0));
14707 +#endif
14708
14709 if (unlikely(overflow))
14710 call_on_stack(print_stack_overflow, isp);
14711 @@ -116,6 +111,12 @@ execute_on_irq_stack(int overflow, struc
14712 : "0" (irq), "1" (desc), "2" (isp),
14713 "D" (desc->handle_irq)
14714 : "memory", "cc", "ecx");
14715 +
14716 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14717 + __set_fs(current_thread_info()->addr_limit);
14718 +#endif
14719 +
14720 + sub_preempt_count(HARDIRQ_OFFSET);
14721 return 1;
14722 }
14723
14724 @@ -124,28 +125,11 @@ execute_on_irq_stack(int overflow, struc
14725 */
14726 void __cpuinit irq_ctx_init(int cpu)
14727 {
14728 - union irq_ctx *irqctx;
14729 -
14730 if (per_cpu(hardirq_ctx, cpu))
14731 return;
14732
14733 - irqctx = &per_cpu(hardirq_stack, cpu);
14734 - irqctx->tinfo.task = NULL;
14735 - irqctx->tinfo.exec_domain = NULL;
14736 - irqctx->tinfo.cpu = cpu;
14737 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
14738 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14739 -
14740 - per_cpu(hardirq_ctx, cpu) = irqctx;
14741 -
14742 - irqctx = &per_cpu(softirq_stack, cpu);
14743 - irqctx->tinfo.task = NULL;
14744 - irqctx->tinfo.exec_domain = NULL;
14745 - irqctx->tinfo.cpu = cpu;
14746 - irqctx->tinfo.preempt_count = 0;
14747 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
14748 -
14749 - per_cpu(softirq_ctx, cpu) = irqctx;
14750 + per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
14751 + per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
14752
14753 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
14754 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
14755 @@ -159,7 +143,6 @@ void irq_ctx_exit(int cpu)
14756 asmlinkage void do_softirq(void)
14757 {
14758 unsigned long flags;
14759 - struct thread_info *curctx;
14760 union irq_ctx *irqctx;
14761 u32 *isp;
14762
14763 @@ -169,15 +152,22 @@ asmlinkage void do_softirq(void)
14764 local_irq_save(flags);
14765
14766 if (local_softirq_pending()) {
14767 - curctx = current_thread_info();
14768 irqctx = __get_cpu_var(softirq_ctx);
14769 - irqctx->tinfo.task = curctx->task;
14770 - irqctx->tinfo.previous_esp = current_stack_pointer;
14771 + irqctx->previous_esp = current_stack_pointer;
14772
14773 /* build the stack frame on the softirq stack */
14774 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14775 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14776 +
14777 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14778 + __set_fs(MAKE_MM_SEG(0));
14779 +#endif
14780
14781 call_on_stack(__do_softirq, isp);
14782 +
14783 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14784 + __set_fs(current_thread_info()->addr_limit);
14785 +#endif
14786 +
14787 /*
14788 * Shouldnt happen, we returned above if in_interrupt():
14789 */
14790 diff -urNp linux-2.6.32.42/arch/x86/kernel/irq.c linux-2.6.32.42/arch/x86/kernel/irq.c
14791 --- linux-2.6.32.42/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
14792 +++ linux-2.6.32.42/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
14793 @@ -15,7 +15,7 @@
14794 #include <asm/mce.h>
14795 #include <asm/hw_irq.h>
14796
14797 -atomic_t irq_err_count;
14798 +atomic_unchecked_t irq_err_count;
14799
14800 /* Function pointer for generic interrupt vector handling */
14801 void (*generic_interrupt_extension)(void) = NULL;
14802 @@ -114,9 +114,9 @@ static int show_other_interrupts(struct
14803 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
14804 seq_printf(p, " Machine check polls\n");
14805 #endif
14806 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
14807 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
14808 #if defined(CONFIG_X86_IO_APIC)
14809 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
14810 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
14811 #endif
14812 return 0;
14813 }
14814 @@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
14815
14816 u64 arch_irq_stat(void)
14817 {
14818 - u64 sum = atomic_read(&irq_err_count);
14819 + u64 sum = atomic_read_unchecked(&irq_err_count);
14820
14821 #ifdef CONFIG_X86_IO_APIC
14822 - sum += atomic_read(&irq_mis_count);
14823 + sum += atomic_read_unchecked(&irq_mis_count);
14824 #endif
14825 return sum;
14826 }
14827 diff -urNp linux-2.6.32.42/arch/x86/kernel/kgdb.c linux-2.6.32.42/arch/x86/kernel/kgdb.c
14828 --- linux-2.6.32.42/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
14829 +++ linux-2.6.32.42/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
14830 @@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
14831
14832 /* clear the trace bit */
14833 linux_regs->flags &= ~X86_EFLAGS_TF;
14834 - atomic_set(&kgdb_cpu_doing_single_step, -1);
14835 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
14836
14837 /* set the trace bit if we're stepping */
14838 if (remcomInBuffer[0] == 's') {
14839 linux_regs->flags |= X86_EFLAGS_TF;
14840 kgdb_single_step = 1;
14841 - atomic_set(&kgdb_cpu_doing_single_step,
14842 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
14843 raw_smp_processor_id());
14844 }
14845
14846 @@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
14847 break;
14848
14849 case DIE_DEBUG:
14850 - if (atomic_read(&kgdb_cpu_doing_single_step) ==
14851 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
14852 raw_smp_processor_id()) {
14853 if (user_mode(regs))
14854 return single_step_cont(regs, args);
14855 @@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
14856 return instruction_pointer(regs);
14857 }
14858
14859 -struct kgdb_arch arch_kgdb_ops = {
14860 +const struct kgdb_arch arch_kgdb_ops = {
14861 /* Breakpoint instruction: */
14862 .gdb_bpt_instr = { 0xcc },
14863 .flags = KGDB_HW_BREAKPOINT,
14864 diff -urNp linux-2.6.32.42/arch/x86/kernel/kprobes.c linux-2.6.32.42/arch/x86/kernel/kprobes.c
14865 --- linux-2.6.32.42/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
14866 +++ linux-2.6.32.42/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
14867 @@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
14868 char op;
14869 s32 raddr;
14870 } __attribute__((packed)) * jop;
14871 - jop = (struct __arch_jmp_op *)from;
14872 +
14873 + jop = (struct __arch_jmp_op *)(ktla_ktva(from));
14874 +
14875 + pax_open_kernel();
14876 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
14877 jop->op = RELATIVEJUMP_INSTRUCTION;
14878 + pax_close_kernel();
14879 }
14880
14881 /*
14882 @@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
14883 kprobe_opcode_t opcode;
14884 kprobe_opcode_t *orig_opcodes = opcodes;
14885
14886 - if (search_exception_tables((unsigned long)opcodes))
14887 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
14888 return 0; /* Page fault may occur on this address. */
14889
14890 retry:
14891 @@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
14892 disp = (u8 *) p->addr + *((s32 *) insn) -
14893 (u8 *) p->ainsn.insn;
14894 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
14895 + pax_open_kernel();
14896 *(s32 *)insn = (s32) disp;
14897 + pax_close_kernel();
14898 }
14899 }
14900 #endif
14901 @@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
14902
14903 static void __kprobes arch_copy_kprobe(struct kprobe *p)
14904 {
14905 - memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
14906 + pax_open_kernel();
14907 + memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
14908 + pax_close_kernel();
14909
14910 fix_riprel(p);
14911
14912 - if (can_boost(p->addr))
14913 + if (can_boost(ktla_ktva(p->addr)))
14914 p->ainsn.boostable = 0;
14915 else
14916 p->ainsn.boostable = -1;
14917
14918 - p->opcode = *p->addr;
14919 + p->opcode = *(ktla_ktva(p->addr));
14920 }
14921
14922 int __kprobes arch_prepare_kprobe(struct kprobe *p)
14923 @@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
14924 if (p->opcode == BREAKPOINT_INSTRUCTION)
14925 regs->ip = (unsigned long)p->addr;
14926 else
14927 - regs->ip = (unsigned long)p->ainsn.insn;
14928 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14929 }
14930
14931 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
14932 @@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
14933 if (p->ainsn.boostable == 1 && !p->post_handler) {
14934 /* Boost up -- we can execute copied instructions directly */
14935 reset_current_kprobe();
14936 - regs->ip = (unsigned long)p->ainsn.insn;
14937 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
14938 preempt_enable_no_resched();
14939 return;
14940 }
14941 @@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
14942 struct kprobe_ctlblk *kcb;
14943
14944 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
14945 - if (*addr != BREAKPOINT_INSTRUCTION) {
14946 + if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
14947 /*
14948 * The breakpoint instruction was removed right
14949 * after we hit it. Another cpu has removed
14950 @@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
14951 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
14952 {
14953 unsigned long *tos = stack_addr(regs);
14954 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
14955 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
14956 unsigned long orig_ip = (unsigned long)p->addr;
14957 kprobe_opcode_t *insn = p->ainsn.insn;
14958
14959 @@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
14960 struct die_args *args = data;
14961 int ret = NOTIFY_DONE;
14962
14963 - if (args->regs && user_mode_vm(args->regs))
14964 + if (args->regs && user_mode(args->regs))
14965 return ret;
14966
14967 switch (val) {
14968 diff -urNp linux-2.6.32.42/arch/x86/kernel/ldt.c linux-2.6.32.42/arch/x86/kernel/ldt.c
14969 --- linux-2.6.32.42/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
14970 +++ linux-2.6.32.42/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
14971 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
14972 if (reload) {
14973 #ifdef CONFIG_SMP
14974 preempt_disable();
14975 - load_LDT(pc);
14976 + load_LDT_nolock(pc);
14977 if (!cpumask_equal(mm_cpumask(current->mm),
14978 cpumask_of(smp_processor_id())))
14979 smp_call_function(flush_ldt, current->mm, 1);
14980 preempt_enable();
14981 #else
14982 - load_LDT(pc);
14983 + load_LDT_nolock(pc);
14984 #endif
14985 }
14986 if (oldsize) {
14987 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
14988 return err;
14989
14990 for (i = 0; i < old->size; i++)
14991 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
14992 + write_ldt_entry(new->ldt, i, old->ldt + i);
14993 return 0;
14994 }
14995
14996 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct
14997 retval = copy_ldt(&mm->context, &old_mm->context);
14998 mutex_unlock(&old_mm->context.lock);
14999 }
15000 +
15001 + if (tsk == current) {
15002 + mm->context.vdso = 0;
15003 +
15004 +#ifdef CONFIG_X86_32
15005 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15006 + mm->context.user_cs_base = 0UL;
15007 + mm->context.user_cs_limit = ~0UL;
15008 +
15009 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15010 + cpus_clear(mm->context.cpu_user_cs_mask);
15011 +#endif
15012 +
15013 +#endif
15014 +#endif
15015 +
15016 + }
15017 +
15018 return retval;
15019 }
15020
15021 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15022 }
15023 }
15024
15025 +#ifdef CONFIG_PAX_SEGMEXEC
15026 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15027 + error = -EINVAL;
15028 + goto out_unlock;
15029 + }
15030 +#endif
15031 +
15032 fill_ldt(&ldt, &ldt_info);
15033 if (oldmode)
15034 ldt.avl = 0;
15035 diff -urNp linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c
15036 --- linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15037 +++ linux-2.6.32.42/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15038 @@ -26,7 +26,7 @@
15039 #include <asm/system.h>
15040 #include <asm/cacheflush.h>
15041
15042 -static void set_idt(void *newidt, __u16 limit)
15043 +static void set_idt(struct desc_struct *newidt, __u16 limit)
15044 {
15045 struct desc_ptr curidt;
15046
15047 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15048 }
15049
15050
15051 -static void set_gdt(void *newgdt, __u16 limit)
15052 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15053 {
15054 struct desc_ptr curgdt;
15055
15056 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15057 }
15058
15059 control_page = page_address(image->control_code_page);
15060 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15061 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15062
15063 relocate_kernel_ptr = control_page;
15064 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15065 diff -urNp linux-2.6.32.42/arch/x86/kernel/microcode_amd.c linux-2.6.32.42/arch/x86/kernel/microcode_amd.c
15066 --- linux-2.6.32.42/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15067 +++ linux-2.6.32.42/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15068 @@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15069 uci->mc = NULL;
15070 }
15071
15072 -static struct microcode_ops microcode_amd_ops = {
15073 +static const struct microcode_ops microcode_amd_ops = {
15074 .request_microcode_user = request_microcode_user,
15075 .request_microcode_fw = request_microcode_fw,
15076 .collect_cpu_info = collect_cpu_info_amd,
15077 @@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15078 .microcode_fini_cpu = microcode_fini_cpu_amd,
15079 };
15080
15081 -struct microcode_ops * __init init_amd_microcode(void)
15082 +const struct microcode_ops * __init init_amd_microcode(void)
15083 {
15084 return &microcode_amd_ops;
15085 }
15086 diff -urNp linux-2.6.32.42/arch/x86/kernel/microcode_core.c linux-2.6.32.42/arch/x86/kernel/microcode_core.c
15087 --- linux-2.6.32.42/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15088 +++ linux-2.6.32.42/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15089 @@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15090
15091 #define MICROCODE_VERSION "2.00"
15092
15093 -static struct microcode_ops *microcode_ops;
15094 +static const struct microcode_ops *microcode_ops;
15095
15096 /*
15097 * Synchronization.
15098 diff -urNp linux-2.6.32.42/arch/x86/kernel/microcode_intel.c linux-2.6.32.42/arch/x86/kernel/microcode_intel.c
15099 --- linux-2.6.32.42/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15100 +++ linux-2.6.32.42/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15101 @@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15102
15103 static int get_ucode_user(void *to, const void *from, size_t n)
15104 {
15105 - return copy_from_user(to, from, n);
15106 + return copy_from_user(to, (__force const void __user *)from, n);
15107 }
15108
15109 static enum ucode_state
15110 request_microcode_user(int cpu, const void __user *buf, size_t size)
15111 {
15112 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15113 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15114 }
15115
15116 static void microcode_fini_cpu(int cpu)
15117 @@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15118 uci->mc = NULL;
15119 }
15120
15121 -static struct microcode_ops microcode_intel_ops = {
15122 +static const struct microcode_ops microcode_intel_ops = {
15123 .request_microcode_user = request_microcode_user,
15124 .request_microcode_fw = request_microcode_fw,
15125 .collect_cpu_info = collect_cpu_info,
15126 @@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15127 .microcode_fini_cpu = microcode_fini_cpu,
15128 };
15129
15130 -struct microcode_ops * __init init_intel_microcode(void)
15131 +const struct microcode_ops * __init init_intel_microcode(void)
15132 {
15133 return &microcode_intel_ops;
15134 }
15135 diff -urNp linux-2.6.32.42/arch/x86/kernel/module.c linux-2.6.32.42/arch/x86/kernel/module.c
15136 --- linux-2.6.32.42/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15137 +++ linux-2.6.32.42/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15138 @@ -34,7 +34,7 @@
15139 #define DEBUGP(fmt...)
15140 #endif
15141
15142 -void *module_alloc(unsigned long size)
15143 +static void *__module_alloc(unsigned long size, pgprot_t prot)
15144 {
15145 struct vm_struct *area;
15146
15147 @@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15148 if (!area)
15149 return NULL;
15150
15151 - return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15152 - PAGE_KERNEL_EXEC);
15153 + return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15154 +}
15155 +
15156 +void *module_alloc(unsigned long size)
15157 +{
15158 +
15159 +#ifdef CONFIG_PAX_KERNEXEC
15160 + return __module_alloc(size, PAGE_KERNEL);
15161 +#else
15162 + return __module_alloc(size, PAGE_KERNEL_EXEC);
15163 +#endif
15164 +
15165 }
15166
15167 /* Free memory returned from module_alloc */
15168 @@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15169 vfree(module_region);
15170 }
15171
15172 +#ifdef CONFIG_PAX_KERNEXEC
15173 +#ifdef CONFIG_X86_32
15174 +void *module_alloc_exec(unsigned long size)
15175 +{
15176 + struct vm_struct *area;
15177 +
15178 + if (size == 0)
15179 + return NULL;
15180 +
15181 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15182 + return area ? area->addr : NULL;
15183 +}
15184 +EXPORT_SYMBOL(module_alloc_exec);
15185 +
15186 +void module_free_exec(struct module *mod, void *module_region)
15187 +{
15188 + vunmap(module_region);
15189 +}
15190 +EXPORT_SYMBOL(module_free_exec);
15191 +#else
15192 +void module_free_exec(struct module *mod, void *module_region)
15193 +{
15194 + module_free(mod, module_region);
15195 +}
15196 +EXPORT_SYMBOL(module_free_exec);
15197 +
15198 +void *module_alloc_exec(unsigned long size)
15199 +{
15200 + return __module_alloc(size, PAGE_KERNEL_RX);
15201 +}
15202 +EXPORT_SYMBOL(module_alloc_exec);
15203 +#endif
15204 +#endif
15205 +
15206 /* We don't need anything special. */
15207 int module_frob_arch_sections(Elf_Ehdr *hdr,
15208 Elf_Shdr *sechdrs,
15209 @@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15210 unsigned int i;
15211 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15212 Elf32_Sym *sym;
15213 - uint32_t *location;
15214 + uint32_t *plocation, location;
15215
15216 DEBUGP("Applying relocate section %u to %u\n", relsec,
15217 sechdrs[relsec].sh_info);
15218 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15219 /* This is where to make the change */
15220 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15221 - + rel[i].r_offset;
15222 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15223 + location = (uint32_t)plocation;
15224 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15225 + plocation = ktla_ktva((void *)plocation);
15226 /* This is the symbol it is referring to. Note that all
15227 undefined symbols have been resolved. */
15228 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15229 @@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15230 switch (ELF32_R_TYPE(rel[i].r_info)) {
15231 case R_386_32:
15232 /* We add the value into the location given */
15233 - *location += sym->st_value;
15234 + pax_open_kernel();
15235 + *plocation += sym->st_value;
15236 + pax_close_kernel();
15237 break;
15238 case R_386_PC32:
15239 /* Add the value, subtract its postition */
15240 - *location += sym->st_value - (uint32_t)location;
15241 + pax_open_kernel();
15242 + *plocation += sym->st_value - location;
15243 + pax_close_kernel();
15244 break;
15245 default:
15246 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15247 @@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15248 case R_X86_64_NONE:
15249 break;
15250 case R_X86_64_64:
15251 + pax_open_kernel();
15252 *(u64 *)loc = val;
15253 + pax_close_kernel();
15254 break;
15255 case R_X86_64_32:
15256 + pax_open_kernel();
15257 *(u32 *)loc = val;
15258 + pax_close_kernel();
15259 if (val != *(u32 *)loc)
15260 goto overflow;
15261 break;
15262 case R_X86_64_32S:
15263 + pax_open_kernel();
15264 *(s32 *)loc = val;
15265 + pax_close_kernel();
15266 if ((s64)val != *(s32 *)loc)
15267 goto overflow;
15268 break;
15269 case R_X86_64_PC32:
15270 val -= (u64)loc;
15271 + pax_open_kernel();
15272 *(u32 *)loc = val;
15273 + pax_close_kernel();
15274 +
15275 #if 0
15276 if ((s64)val != *(s32 *)loc)
15277 goto overflow;
15278 diff -urNp linux-2.6.32.42/arch/x86/kernel/paravirt.c linux-2.6.32.42/arch/x86/kernel/paravirt.c
15279 --- linux-2.6.32.42/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15280 +++ linux-2.6.32.42/arch/x86/kernel/paravirt.c 2011-05-16 21:46:57.000000000 -0400
15281 @@ -122,7 +122,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15282 * corresponding structure. */
15283 static void *get_call_destination(u8 type)
15284 {
15285 - struct paravirt_patch_template tmpl = {
15286 + const struct paravirt_patch_template tmpl = {
15287 .pv_init_ops = pv_init_ops,
15288 .pv_time_ops = pv_time_ops,
15289 .pv_cpu_ops = pv_cpu_ops,
15290 @@ -133,6 +133,9 @@ static void *get_call_destination(u8 typ
15291 .pv_lock_ops = pv_lock_ops,
15292 #endif
15293 };
15294 +
15295 + pax_track_stack();
15296 +
15297 return *((void **)&tmpl + type);
15298 }
15299
15300 @@ -145,14 +148,14 @@ unsigned paravirt_patch_default(u8 type,
15301 if (opfunc == NULL)
15302 /* If there's no function, patch it with a ud2a (BUG) */
15303 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15304 - else if (opfunc == _paravirt_nop)
15305 + else if (opfunc == (void *)_paravirt_nop)
15306 /* If the operation is a nop, then nop the callsite */
15307 ret = paravirt_patch_nop();
15308
15309 /* identity functions just return their single argument */
15310 - else if (opfunc == _paravirt_ident_32)
15311 + else if (opfunc == (void *)_paravirt_ident_32)
15312 ret = paravirt_patch_ident_32(insnbuf, len);
15313 - else if (opfunc == _paravirt_ident_64)
15314 + else if (opfunc == (void *)_paravirt_ident_64)
15315 ret = paravirt_patch_ident_64(insnbuf, len);
15316
15317 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15318 @@ -178,7 +181,7 @@ unsigned paravirt_patch_insns(void *insn
15319 if (insn_len > len || start == NULL)
15320 insn_len = len;
15321 else
15322 - memcpy(insnbuf, start, insn_len);
15323 + memcpy(insnbuf, ktla_ktva(start), insn_len);
15324
15325 return insn_len;
15326 }
15327 @@ -294,22 +297,22 @@ void arch_flush_lazy_mmu_mode(void)
15328 preempt_enable();
15329 }
15330
15331 -struct pv_info pv_info = {
15332 +struct pv_info pv_info __read_only = {
15333 .name = "bare hardware",
15334 .paravirt_enabled = 0,
15335 .kernel_rpl = 0,
15336 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15337 };
15338
15339 -struct pv_init_ops pv_init_ops = {
15340 +struct pv_init_ops pv_init_ops __read_only = {
15341 .patch = native_patch,
15342 };
15343
15344 -struct pv_time_ops pv_time_ops = {
15345 +struct pv_time_ops pv_time_ops __read_only = {
15346 .sched_clock = native_sched_clock,
15347 };
15348
15349 -struct pv_irq_ops pv_irq_ops = {
15350 +struct pv_irq_ops pv_irq_ops __read_only = {
15351 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15352 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15353 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15354 @@ -321,7 +324,7 @@ struct pv_irq_ops pv_irq_ops = {
15355 #endif
15356 };
15357
15358 -struct pv_cpu_ops pv_cpu_ops = {
15359 +struct pv_cpu_ops pv_cpu_ops __read_only = {
15360 .cpuid = native_cpuid,
15361 .get_debugreg = native_get_debugreg,
15362 .set_debugreg = native_set_debugreg,
15363 @@ -382,7 +385,7 @@ struct pv_cpu_ops pv_cpu_ops = {
15364 .end_context_switch = paravirt_nop,
15365 };
15366
15367 -struct pv_apic_ops pv_apic_ops = {
15368 +struct pv_apic_ops pv_apic_ops __read_only = {
15369 #ifdef CONFIG_X86_LOCAL_APIC
15370 .startup_ipi_hook = paravirt_nop,
15371 #endif
15372 @@ -396,7 +399,7 @@ struct pv_apic_ops pv_apic_ops = {
15373 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15374 #endif
15375
15376 -struct pv_mmu_ops pv_mmu_ops = {
15377 +struct pv_mmu_ops pv_mmu_ops __read_only = {
15378
15379 .read_cr2 = native_read_cr2,
15380 .write_cr2 = native_write_cr2,
15381 @@ -467,6 +470,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15382 },
15383
15384 .set_fixmap = native_set_fixmap,
15385 +
15386 +#ifdef CONFIG_PAX_KERNEXEC
15387 + .pax_open_kernel = native_pax_open_kernel,
15388 + .pax_close_kernel = native_pax_close_kernel,
15389 +#endif
15390 +
15391 };
15392
15393 EXPORT_SYMBOL_GPL(pv_time_ops);
15394 diff -urNp linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c
15395 --- linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
15396 +++ linux-2.6.32.42/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
15397 @@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
15398 __raw_spin_lock(lock);
15399 }
15400
15401 -struct pv_lock_ops pv_lock_ops = {
15402 +struct pv_lock_ops pv_lock_ops __read_only = {
15403 #ifdef CONFIG_SMP
15404 .spin_is_locked = __ticket_spin_is_locked,
15405 .spin_is_contended = __ticket_spin_is_contended,
15406 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c
15407 --- linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
15408 +++ linux-2.6.32.42/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
15409 @@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
15410 free_pages((unsigned long)vaddr, get_order(size));
15411 }
15412
15413 -static struct dma_map_ops calgary_dma_ops = {
15414 +static const struct dma_map_ops calgary_dma_ops = {
15415 .alloc_coherent = calgary_alloc_coherent,
15416 .free_coherent = calgary_free_coherent,
15417 .map_sg = calgary_map_sg,
15418 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-dma.c linux-2.6.32.42/arch/x86/kernel/pci-dma.c
15419 --- linux-2.6.32.42/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
15420 +++ linux-2.6.32.42/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
15421 @@ -14,7 +14,7 @@
15422
15423 static int forbid_dac __read_mostly;
15424
15425 -struct dma_map_ops *dma_ops;
15426 +const struct dma_map_ops *dma_ops;
15427 EXPORT_SYMBOL(dma_ops);
15428
15429 static int iommu_sac_force __read_mostly;
15430 @@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
15431
15432 int dma_supported(struct device *dev, u64 mask)
15433 {
15434 - struct dma_map_ops *ops = get_dma_ops(dev);
15435 + const struct dma_map_ops *ops = get_dma_ops(dev);
15436
15437 #ifdef CONFIG_PCI
15438 if (mask > 0xffffffff && forbid_dac > 0) {
15439 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c
15440 --- linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
15441 +++ linux-2.6.32.42/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
15442 @@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
15443 return -1;
15444 }
15445
15446 -static struct dma_map_ops gart_dma_ops = {
15447 +static const struct dma_map_ops gart_dma_ops = {
15448 .map_sg = gart_map_sg,
15449 .unmap_sg = gart_unmap_sg,
15450 .map_page = gart_map_page,
15451 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-nommu.c linux-2.6.32.42/arch/x86/kernel/pci-nommu.c
15452 --- linux-2.6.32.42/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
15453 +++ linux-2.6.32.42/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
15454 @@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
15455 flush_write_buffers();
15456 }
15457
15458 -struct dma_map_ops nommu_dma_ops = {
15459 +const struct dma_map_ops nommu_dma_ops = {
15460 .alloc_coherent = dma_generic_alloc_coherent,
15461 .free_coherent = nommu_free_coherent,
15462 .map_sg = nommu_map_sg,
15463 diff -urNp linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c
15464 --- linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
15465 +++ linux-2.6.32.42/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
15466 @@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
15467 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
15468 }
15469
15470 -static struct dma_map_ops swiotlb_dma_ops = {
15471 +static const struct dma_map_ops swiotlb_dma_ops = {
15472 .mapping_error = swiotlb_dma_mapping_error,
15473 .alloc_coherent = x86_swiotlb_alloc_coherent,
15474 .free_coherent = swiotlb_free_coherent,
15475 diff -urNp linux-2.6.32.42/arch/x86/kernel/process_32.c linux-2.6.32.42/arch/x86/kernel/process_32.c
15476 --- linux-2.6.32.42/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
15477 +++ linux-2.6.32.42/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
15478 @@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
15479 unsigned long thread_saved_pc(struct task_struct *tsk)
15480 {
15481 return ((unsigned long *)tsk->thread.sp)[3];
15482 +//XXX return tsk->thread.eip;
15483 }
15484
15485 #ifndef CONFIG_SMP
15486 @@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
15487 unsigned short ss, gs;
15488 const char *board;
15489
15490 - if (user_mode_vm(regs)) {
15491 + if (user_mode(regs)) {
15492 sp = regs->sp;
15493 ss = regs->ss & 0xffff;
15494 - gs = get_user_gs(regs);
15495 } else {
15496 sp = (unsigned long) (&regs->sp);
15497 savesegment(ss, ss);
15498 - savesegment(gs, gs);
15499 }
15500 + gs = get_user_gs(regs);
15501
15502 printk("\n");
15503
15504 @@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
15505 regs.bx = (unsigned long) fn;
15506 regs.dx = (unsigned long) arg;
15507
15508 - regs.ds = __USER_DS;
15509 - regs.es = __USER_DS;
15510 + regs.ds = __KERNEL_DS;
15511 + regs.es = __KERNEL_DS;
15512 regs.fs = __KERNEL_PERCPU;
15513 - regs.gs = __KERNEL_STACK_CANARY;
15514 + savesegment(gs, regs.gs);
15515 regs.orig_ax = -1;
15516 regs.ip = (unsigned long) kernel_thread_helper;
15517 regs.cs = __KERNEL_CS | get_kernel_rpl();
15518 @@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
15519 struct task_struct *tsk;
15520 int err;
15521
15522 - childregs = task_pt_regs(p);
15523 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
15524 *childregs = *regs;
15525 childregs->ax = 0;
15526 childregs->sp = sp;
15527
15528 p->thread.sp = (unsigned long) childregs;
15529 p->thread.sp0 = (unsigned long) (childregs+1);
15530 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15531
15532 p->thread.ip = (unsigned long) ret_from_fork;
15533
15534 @@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
15535 struct thread_struct *prev = &prev_p->thread,
15536 *next = &next_p->thread;
15537 int cpu = smp_processor_id();
15538 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
15539 + struct tss_struct *tss = init_tss + cpu;
15540 bool preload_fpu;
15541
15542 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
15543 @@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
15544 */
15545 lazy_save_gs(prev->gs);
15546
15547 +#ifdef CONFIG_PAX_MEMORY_UDEREF
15548 + __set_fs(task_thread_info(next_p)->addr_limit);
15549 +#endif
15550 +
15551 /*
15552 * Load the per-thread Thread-Local Storage descriptor.
15553 */
15554 @@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
15555 */
15556 arch_end_context_switch(next_p);
15557
15558 + percpu_write(current_task, next_p);
15559 + percpu_write(current_tinfo, &next_p->tinfo);
15560 +
15561 if (preload_fpu)
15562 __math_state_restore();
15563
15564 @@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
15565 if (prev->gs | next->gs)
15566 lazy_load_gs(next->gs);
15567
15568 - percpu_write(current_task, next_p);
15569 -
15570 return prev_p;
15571 }
15572
15573 @@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
15574 } while (count++ < 16);
15575 return 0;
15576 }
15577 -
15578 diff -urNp linux-2.6.32.42/arch/x86/kernel/process_64.c linux-2.6.32.42/arch/x86/kernel/process_64.c
15579 --- linux-2.6.32.42/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
15580 +++ linux-2.6.32.42/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
15581 @@ -91,7 +91,7 @@ static void __exit_idle(void)
15582 void exit_idle(void)
15583 {
15584 /* idle loop has pid 0 */
15585 - if (current->pid)
15586 + if (task_pid_nr(current))
15587 return;
15588 __exit_idle();
15589 }
15590 @@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
15591 if (!board)
15592 board = "";
15593 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
15594 - current->pid, current->comm, print_tainted(),
15595 + task_pid_nr(current), current->comm, print_tainted(),
15596 init_utsname()->release,
15597 (int)strcspn(init_utsname()->version, " "),
15598 init_utsname()->version, board);
15599 @@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
15600 struct pt_regs *childregs;
15601 struct task_struct *me = current;
15602
15603 - childregs = ((struct pt_regs *)
15604 - (THREAD_SIZE + task_stack_page(p))) - 1;
15605 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
15606 *childregs = *regs;
15607
15608 childregs->ax = 0;
15609 @@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
15610 p->thread.sp = (unsigned long) childregs;
15611 p->thread.sp0 = (unsigned long) (childregs+1);
15612 p->thread.usersp = me->thread.usersp;
15613 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15614
15615 set_tsk_thread_flag(p, TIF_FORK);
15616
15617 @@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
15618 struct thread_struct *prev = &prev_p->thread;
15619 struct thread_struct *next = &next_p->thread;
15620 int cpu = smp_processor_id();
15621 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
15622 + struct tss_struct *tss = init_tss + cpu;
15623 unsigned fsindex, gsindex;
15624 bool preload_fpu;
15625
15626 @@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
15627 prev->usersp = percpu_read(old_rsp);
15628 percpu_write(old_rsp, next->usersp);
15629 percpu_write(current_task, next_p);
15630 + percpu_write(current_tinfo, &next_p->tinfo);
15631
15632 - percpu_write(kernel_stack,
15633 - (unsigned long)task_stack_page(next_p) +
15634 - THREAD_SIZE - KERNEL_STACK_OFFSET);
15635 + percpu_write(kernel_stack, next->sp0);
15636
15637 /*
15638 * Now maybe reload the debug registers and handle I/O bitmaps
15639 @@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
15640 if (!p || p == current || p->state == TASK_RUNNING)
15641 return 0;
15642 stack = (unsigned long)task_stack_page(p);
15643 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15644 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15645 return 0;
15646 fp = *(u64 *)(p->thread.sp);
15647 do {
15648 - if (fp < (unsigned long)stack ||
15649 - fp >= (unsigned long)stack+THREAD_SIZE)
15650 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15651 return 0;
15652 ip = *(u64 *)(fp+8);
15653 if (!in_sched_functions(ip))
15654 diff -urNp linux-2.6.32.42/arch/x86/kernel/process.c linux-2.6.32.42/arch/x86/kernel/process.c
15655 --- linux-2.6.32.42/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
15656 +++ linux-2.6.32.42/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
15657 @@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
15658
15659 void free_thread_info(struct thread_info *ti)
15660 {
15661 - free_thread_xstate(ti->task);
15662 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
15663 }
15664
15665 +static struct kmem_cache *task_struct_cachep;
15666 +
15667 void arch_task_cache_init(void)
15668 {
15669 - task_xstate_cachep =
15670 - kmem_cache_create("task_xstate", xstate_size,
15671 + /* create a slab on which task_structs can be allocated */
15672 + task_struct_cachep =
15673 + kmem_cache_create("task_struct", sizeof(struct task_struct),
15674 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
15675 +
15676 + task_xstate_cachep =
15677 + kmem_cache_create("task_xstate", xstate_size,
15678 __alignof__(union thread_xstate),
15679 - SLAB_PANIC | SLAB_NOTRACK, NULL);
15680 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
15681 +}
15682 +
15683 +struct task_struct *alloc_task_struct(void)
15684 +{
15685 + return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
15686 +}
15687 +
15688 +void free_task_struct(struct task_struct *task)
15689 +{
15690 + free_thread_xstate(task);
15691 + kmem_cache_free(task_struct_cachep, task);
15692 }
15693
15694 /*
15695 @@ -73,7 +90,7 @@ void exit_thread(void)
15696 unsigned long *bp = t->io_bitmap_ptr;
15697
15698 if (bp) {
15699 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
15700 + struct tss_struct *tss = init_tss + get_cpu();
15701
15702 t->io_bitmap_ptr = NULL;
15703 clear_thread_flag(TIF_IO_BITMAP);
15704 @@ -93,6 +110,9 @@ void flush_thread(void)
15705
15706 clear_tsk_thread_flag(tsk, TIF_DEBUG);
15707
15708 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15709 + loadsegment(gs, 0);
15710 +#endif
15711 tsk->thread.debugreg0 = 0;
15712 tsk->thread.debugreg1 = 0;
15713 tsk->thread.debugreg2 = 0;
15714 @@ -307,7 +327,7 @@ void default_idle(void)
15715 EXPORT_SYMBOL(default_idle);
15716 #endif
15717
15718 -void stop_this_cpu(void *dummy)
15719 +__noreturn void stop_this_cpu(void *dummy)
15720 {
15721 local_irq_disable();
15722 /*
15723 @@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
15724 }
15725 early_param("idle", idle_setup);
15726
15727 -unsigned long arch_align_stack(unsigned long sp)
15728 +#ifdef CONFIG_PAX_RANDKSTACK
15729 +asmlinkage void pax_randomize_kstack(void)
15730 {
15731 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
15732 - sp -= get_random_int() % 8192;
15733 - return sp & ~0xf;
15734 -}
15735 + struct thread_struct *thread = &current->thread;
15736 + unsigned long time;
15737
15738 -unsigned long arch_randomize_brk(struct mm_struct *mm)
15739 -{
15740 - unsigned long range_end = mm->brk + 0x02000000;
15741 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
15742 + if (!randomize_va_space)
15743 + return;
15744 +
15745 + rdtscl(time);
15746 +
15747 + /* P4 seems to return a 0 LSB, ignore it */
15748 +#ifdef CONFIG_MPENTIUM4
15749 + time &= 0x3EUL;
15750 + time <<= 2;
15751 +#elif defined(CONFIG_X86_64)
15752 + time &= 0xFUL;
15753 + time <<= 4;
15754 +#else
15755 + time &= 0x1FUL;
15756 + time <<= 3;
15757 +#endif
15758 +
15759 + thread->sp0 ^= time;
15760 + load_sp0(init_tss + smp_processor_id(), thread);
15761 +
15762 +#ifdef CONFIG_X86_64
15763 + percpu_write(kernel_stack, thread->sp0);
15764 +#endif
15765 }
15766 +#endif
15767
15768 diff -urNp linux-2.6.32.42/arch/x86/kernel/ptrace.c linux-2.6.32.42/arch/x86/kernel/ptrace.c
15769 --- linux-2.6.32.42/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
15770 +++ linux-2.6.32.42/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
15771 @@ -925,7 +925,7 @@ static const struct user_regset_view use
15772 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
15773 {
15774 int ret;
15775 - unsigned long __user *datap = (unsigned long __user *)data;
15776 + unsigned long __user *datap = (__force unsigned long __user *)data;
15777
15778 switch (request) {
15779 /* read the word at location addr in the USER area. */
15780 @@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
15781 if (addr < 0)
15782 return -EIO;
15783 ret = do_get_thread_area(child, addr,
15784 - (struct user_desc __user *) data);
15785 + (__force struct user_desc __user *) data);
15786 break;
15787
15788 case PTRACE_SET_THREAD_AREA:
15789 if (addr < 0)
15790 return -EIO;
15791 ret = do_set_thread_area(child, addr,
15792 - (struct user_desc __user *) data, 0);
15793 + (__force struct user_desc __user *) data, 0);
15794 break;
15795 #endif
15796
15797 @@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
15798 #ifdef CONFIG_X86_PTRACE_BTS
15799 case PTRACE_BTS_CONFIG:
15800 ret = ptrace_bts_config
15801 - (child, data, (struct ptrace_bts_config __user *)addr);
15802 + (child, data, (__force struct ptrace_bts_config __user *)addr);
15803 break;
15804
15805 case PTRACE_BTS_STATUS:
15806 ret = ptrace_bts_status
15807 - (child, data, (struct ptrace_bts_config __user *)addr);
15808 + (child, data, (__force struct ptrace_bts_config __user *)addr);
15809 break;
15810
15811 case PTRACE_BTS_SIZE:
15812 @@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
15813
15814 case PTRACE_BTS_GET:
15815 ret = ptrace_bts_read_record
15816 - (child, data, (struct bts_struct __user *) addr);
15817 + (child, data, (__force struct bts_struct __user *) addr);
15818 break;
15819
15820 case PTRACE_BTS_CLEAR:
15821 @@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
15822
15823 case PTRACE_BTS_DRAIN:
15824 ret = ptrace_bts_drain
15825 - (child, data, (struct bts_struct __user *) addr);
15826 + (child, data, (__force struct bts_struct __user *) addr);
15827 break;
15828 #endif /* CONFIG_X86_PTRACE_BTS */
15829
15830 @@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
15831 info.si_code = si_code;
15832
15833 /* User-mode ip? */
15834 - info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
15835 + info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
15836
15837 /* Send us the fake SIGTRAP */
15838 force_sig_info(SIGTRAP, &info, tsk);
15839 @@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
15840 * We must return the syscall number to actually look up in the table.
15841 * This can be -1L to skip running any syscall at all.
15842 */
15843 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
15844 +long syscall_trace_enter(struct pt_regs *regs)
15845 {
15846 long ret = 0;
15847
15848 @@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
15849 return ret ?: regs->orig_ax;
15850 }
15851
15852 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
15853 +void syscall_trace_leave(struct pt_regs *regs)
15854 {
15855 if (unlikely(current->audit_context))
15856 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
15857 diff -urNp linux-2.6.32.42/arch/x86/kernel/reboot.c linux-2.6.32.42/arch/x86/kernel/reboot.c
15858 --- linux-2.6.32.42/arch/x86/kernel/reboot.c 2011-03-27 14:31:47.000000000 -0400
15859 +++ linux-2.6.32.42/arch/x86/kernel/reboot.c 2011-05-22 23:02:03.000000000 -0400
15860 @@ -33,7 +33,7 @@ void (*pm_power_off)(void);
15861 EXPORT_SYMBOL(pm_power_off);
15862
15863 static const struct desc_ptr no_idt = {};
15864 -static int reboot_mode;
15865 +static unsigned short reboot_mode;
15866 enum reboot_type reboot_type = BOOT_KBD;
15867 int reboot_force;
15868
15869 @@ -292,12 +292,12 @@ core_initcall(reboot_init);
15870 controller to pulse the CPU reset line, which is more thorough, but
15871 doesn't work with at least one type of 486 motherboard. It is easy
15872 to stop this code working; hence the copious comments. */
15873 -static const unsigned long long
15874 -real_mode_gdt_entries [3] =
15875 +static struct desc_struct
15876 +real_mode_gdt_entries [3] __read_only =
15877 {
15878 - 0x0000000000000000ULL, /* Null descriptor */
15879 - 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
15880 - 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
15881 + GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
15882 + GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
15883 + GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
15884 };
15885
15886 static const struct desc_ptr
15887 @@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
15888 * specified by the code and length parameters.
15889 * We assume that length will aways be less that 100!
15890 */
15891 -void machine_real_restart(const unsigned char *code, int length)
15892 +__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
15893 {
15894 local_irq_disable();
15895
15896 @@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
15897 /* Remap the kernel at virtual address zero, as well as offset zero
15898 from the kernel segment. This assumes the kernel segment starts at
15899 virtual address PAGE_OFFSET. */
15900 - memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15901 - sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
15902 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
15903 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
15904
15905 /*
15906 * Use `swapper_pg_dir' as our page directory.
15907 @@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
15908 boot)". This seems like a fairly standard thing that gets set by
15909 REBOOT.COM programs, and the previous reset routine did this
15910 too. */
15911 - *((unsigned short *)0x472) = reboot_mode;
15912 + *(unsigned short *)(__va(0x472)) = reboot_mode;
15913
15914 /* For the switch to real mode, copy some code to low memory. It has
15915 to be in the first 64k because it is running in 16-bit mode, and it
15916 has to have the same physical and virtual address, because it turns
15917 off paging. Copy it near the end of the first page, out of the way
15918 of BIOS variables. */
15919 - memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
15920 - real_mode_switch, sizeof (real_mode_switch));
15921 - memcpy((void *)(0x1000 - 100), code, length);
15922 + memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
15923 + memcpy(__va(0x1000 - 100), code, length);
15924
15925 /* Set up the IDT for real mode. */
15926 load_idt(&real_mode_idt);
15927 @@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
15928 __asm__ __volatile__ ("ljmp $0x0008,%0"
15929 :
15930 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
15931 + do { } while (1);
15932 }
15933 #ifdef CONFIG_APM_MODULE
15934 EXPORT_SYMBOL(machine_real_restart);
15935 @@ -536,7 +536,7 @@ void __attribute__((weak)) mach_reboot_f
15936 {
15937 }
15938
15939 -static void native_machine_emergency_restart(void)
15940 +__noreturn static void native_machine_emergency_restart(void)
15941 {
15942 int i;
15943
15944 @@ -651,13 +651,13 @@ void native_machine_shutdown(void)
15945 #endif
15946 }
15947
15948 -static void __machine_emergency_restart(int emergency)
15949 +static __noreturn void __machine_emergency_restart(int emergency)
15950 {
15951 reboot_emergency = emergency;
15952 machine_ops.emergency_restart();
15953 }
15954
15955 -static void native_machine_restart(char *__unused)
15956 +static __noreturn void native_machine_restart(char *__unused)
15957 {
15958 printk("machine restart\n");
15959
15960 @@ -666,7 +666,7 @@ static void native_machine_restart(char
15961 __machine_emergency_restart(0);
15962 }
15963
15964 -static void native_machine_halt(void)
15965 +static __noreturn void native_machine_halt(void)
15966 {
15967 /* stop other cpus and apics */
15968 machine_shutdown();
15969 @@ -677,7 +677,7 @@ static void native_machine_halt(void)
15970 stop_this_cpu(NULL);
15971 }
15972
15973 -static void native_machine_power_off(void)
15974 +__noreturn static void native_machine_power_off(void)
15975 {
15976 if (pm_power_off) {
15977 if (!reboot_force)
15978 @@ -686,6 +686,7 @@ static void native_machine_power_off(voi
15979 }
15980 /* a fallback in case there is no PM info available */
15981 tboot_shutdown(TB_SHUTDOWN_HALT);
15982 + do { } while (1);
15983 }
15984
15985 struct machine_ops machine_ops = {
15986 diff -urNp linux-2.6.32.42/arch/x86/kernel/setup.c linux-2.6.32.42/arch/x86/kernel/setup.c
15987 --- linux-2.6.32.42/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
15988 +++ linux-2.6.32.42/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
15989 @@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
15990
15991 if (!boot_params.hdr.root_flags)
15992 root_mountflags &= ~MS_RDONLY;
15993 - init_mm.start_code = (unsigned long) _text;
15994 - init_mm.end_code = (unsigned long) _etext;
15995 + init_mm.start_code = ktla_ktva((unsigned long) _text);
15996 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
15997 init_mm.end_data = (unsigned long) _edata;
15998 init_mm.brk = _brk_end;
15999
16000 - code_resource.start = virt_to_phys(_text);
16001 - code_resource.end = virt_to_phys(_etext)-1;
16002 - data_resource.start = virt_to_phys(_etext);
16003 + code_resource.start = virt_to_phys(ktla_ktva(_text));
16004 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16005 + data_resource.start = virt_to_phys(_sdata);
16006 data_resource.end = virt_to_phys(_edata)-1;
16007 bss_resource.start = virt_to_phys(&__bss_start);
16008 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16009 diff -urNp linux-2.6.32.42/arch/x86/kernel/setup_percpu.c linux-2.6.32.42/arch/x86/kernel/setup_percpu.c
16010 --- linux-2.6.32.42/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16011 +++ linux-2.6.32.42/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16012 @@ -25,19 +25,17 @@
16013 # define DBG(x...)
16014 #endif
16015
16016 -DEFINE_PER_CPU(int, cpu_number);
16017 +#ifdef CONFIG_SMP
16018 +DEFINE_PER_CPU(unsigned int, cpu_number);
16019 EXPORT_PER_CPU_SYMBOL(cpu_number);
16020 +#endif
16021
16022 -#ifdef CONFIG_X86_64
16023 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16024 -#else
16025 -#define BOOT_PERCPU_OFFSET 0
16026 -#endif
16027
16028 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16029 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16030
16031 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16032 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16033 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16034 };
16035 EXPORT_SYMBOL(__per_cpu_offset);
16036 @@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16037 {
16038 #ifdef CONFIG_X86_32
16039 struct desc_struct gdt;
16040 + unsigned long base = per_cpu_offset(cpu);
16041
16042 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16043 - 0x2 | DESCTYPE_S, 0x8);
16044 - gdt.s = 1;
16045 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16046 + 0x83 | DESCTYPE_S, 0xC);
16047 write_gdt_entry(get_cpu_gdt_table(cpu),
16048 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16049 #endif
16050 @@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16051 /* alrighty, percpu areas up and running */
16052 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16053 for_each_possible_cpu(cpu) {
16054 +#ifdef CONFIG_CC_STACKPROTECTOR
16055 +#ifdef CONFIG_X86_32
16056 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
16057 +#endif
16058 +#endif
16059 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16060 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16061 per_cpu(cpu_number, cpu) = cpu;
16062 @@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16063 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16064 #endif
16065 #endif
16066 +#ifdef CONFIG_CC_STACKPROTECTOR
16067 +#ifdef CONFIG_X86_32
16068 + if (!cpu)
16069 + per_cpu(stack_canary.canary, cpu) = canary;
16070 +#endif
16071 +#endif
16072 /*
16073 * Up to this point, the boot CPU has been using .data.init
16074 * area. Reload any changed state for the boot CPU.
16075 diff -urNp linux-2.6.32.42/arch/x86/kernel/signal.c linux-2.6.32.42/arch/x86/kernel/signal.c
16076 --- linux-2.6.32.42/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16077 +++ linux-2.6.32.42/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16078 @@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16079 * Align the stack pointer according to the i386 ABI,
16080 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16081 */
16082 - sp = ((sp + 4) & -16ul) - 4;
16083 + sp = ((sp - 12) & -16ul) - 4;
16084 #else /* !CONFIG_X86_32 */
16085 sp = round_down(sp, 16) - 8;
16086 #endif
16087 @@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16088 * Return an always-bogus address instead so we will die with SIGSEGV.
16089 */
16090 if (onsigstack && !likely(on_sig_stack(sp)))
16091 - return (void __user *)-1L;
16092 + return (__force void __user *)-1L;
16093
16094 /* save i387 state */
16095 if (used_math() && save_i387_xstate(*fpstate) < 0)
16096 - return (void __user *)-1L;
16097 + return (__force void __user *)-1L;
16098
16099 return (void __user *)sp;
16100 }
16101 @@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16102 }
16103
16104 if (current->mm->context.vdso)
16105 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16106 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16107 else
16108 - restorer = &frame->retcode;
16109 + restorer = (void __user *)&frame->retcode;
16110 if (ka->sa.sa_flags & SA_RESTORER)
16111 restorer = ka->sa.sa_restorer;
16112
16113 @@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16114 * reasons and because gdb uses it as a signature to notice
16115 * signal handler stack frames.
16116 */
16117 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16118 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16119
16120 if (err)
16121 return -EFAULT;
16122 @@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16123 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16124
16125 /* Set up to return from userspace. */
16126 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16127 + if (current->mm->context.vdso)
16128 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16129 + else
16130 + restorer = (void __user *)&frame->retcode;
16131 if (ka->sa.sa_flags & SA_RESTORER)
16132 restorer = ka->sa.sa_restorer;
16133 put_user_ex(restorer, &frame->pretcode);
16134 @@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16135 * reasons and because gdb uses it as a signature to notice
16136 * signal handler stack frames.
16137 */
16138 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16139 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16140 } put_user_catch(err);
16141
16142 if (err)
16143 @@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16144 int signr;
16145 sigset_t *oldset;
16146
16147 + pax_track_stack();
16148 +
16149 /*
16150 * We want the common case to go fast, which is why we may in certain
16151 * cases get here from kernel mode. Just return without doing anything
16152 @@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16153 * X86_32: vm86 regs switched out by assembly code before reaching
16154 * here, so testing against kernel CS suffices.
16155 */
16156 - if (!user_mode(regs))
16157 + if (!user_mode_novm(regs))
16158 return;
16159
16160 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16161 diff -urNp linux-2.6.32.42/arch/x86/kernel/smpboot.c linux-2.6.32.42/arch/x86/kernel/smpboot.c
16162 --- linux-2.6.32.42/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16163 +++ linux-2.6.32.42/arch/x86/kernel/smpboot.c 2011-05-11 18:25:15.000000000 -0400
16164 @@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16165 */
16166 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16167
16168 -void cpu_hotplug_driver_lock()
16169 +void cpu_hotplug_driver_lock(void)
16170 {
16171 - mutex_lock(&x86_cpu_hotplug_driver_mutex);
16172 + mutex_lock(&x86_cpu_hotplug_driver_mutex);
16173 }
16174
16175 -void cpu_hotplug_driver_unlock()
16176 +void cpu_hotplug_driver_unlock(void)
16177 {
16178 - mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16179 + mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16180 }
16181
16182 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16183 @@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16184 set_idle_for_cpu(cpu, c_idle.idle);
16185 do_rest:
16186 per_cpu(current_task, cpu) = c_idle.idle;
16187 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16188 #ifdef CONFIG_X86_32
16189 /* Stack for startup_32 can be just as for start_secondary onwards */
16190 irq_ctx_init(cpu);
16191 @@ -750,11 +751,13 @@ do_rest:
16192 #else
16193 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16194 initial_gs = per_cpu_offset(cpu);
16195 - per_cpu(kernel_stack, cpu) =
16196 - (unsigned long)task_stack_page(c_idle.idle) -
16197 - KERNEL_STACK_OFFSET + THREAD_SIZE;
16198 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16199 #endif
16200 +
16201 + pax_open_kernel();
16202 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16203 + pax_close_kernel();
16204 +
16205 initial_code = (unsigned long)start_secondary;
16206 stack_start.sp = (void *) c_idle.idle->thread.sp;
16207
16208 @@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16209
16210 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16211
16212 +#ifdef CONFIG_PAX_PER_CPU_PGD
16213 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16214 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16215 + KERNEL_PGD_PTRS);
16216 +#endif
16217 +
16218 err = do_boot_cpu(apicid, cpu);
16219
16220 if (err) {
16221 diff -urNp linux-2.6.32.42/arch/x86/kernel/step.c linux-2.6.32.42/arch/x86/kernel/step.c
16222 --- linux-2.6.32.42/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16223 +++ linux-2.6.32.42/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16224 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16225 struct desc_struct *desc;
16226 unsigned long base;
16227
16228 - seg &= ~7UL;
16229 + seg >>= 3;
16230
16231 mutex_lock(&child->mm->context.lock);
16232 - if (unlikely((seg >> 3) >= child->mm->context.size))
16233 + if (unlikely(seg >= child->mm->context.size))
16234 addr = -1L; /* bogus selector, access would fault */
16235 else {
16236 desc = child->mm->context.ldt + seg;
16237 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16238 addr += base;
16239 }
16240 mutex_unlock(&child->mm->context.lock);
16241 - }
16242 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16243 + addr = ktla_ktva(addr);
16244
16245 return addr;
16246 }
16247 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16248 unsigned char opcode[15];
16249 unsigned long addr = convert_ip_to_linear(child, regs);
16250
16251 + if (addr == -EINVAL)
16252 + return 0;
16253 +
16254 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16255 for (i = 0; i < copied; i++) {
16256 switch (opcode[i]) {
16257 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16258
16259 #ifdef CONFIG_X86_64
16260 case 0x40 ... 0x4f:
16261 - if (regs->cs != __USER_CS)
16262 + if ((regs->cs & 0xffff) != __USER_CS)
16263 /* 32-bit mode: register increment */
16264 return 0;
16265 /* 64-bit mode: REX prefix */
16266 diff -urNp linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S
16267 --- linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16268 +++ linux-2.6.32.42/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16269 @@ -1,3 +1,4 @@
16270 +.section .rodata,"a",@progbits
16271 ENTRY(sys_call_table)
16272 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16273 .long sys_exit
16274 diff -urNp linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c
16275 --- linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16276 +++ linux-2.6.32.42/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16277 @@ -24,6 +24,21 @@
16278
16279 #include <asm/syscalls.h>
16280
16281 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16282 +{
16283 + unsigned long pax_task_size = TASK_SIZE;
16284 +
16285 +#ifdef CONFIG_PAX_SEGMEXEC
16286 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16287 + pax_task_size = SEGMEXEC_TASK_SIZE;
16288 +#endif
16289 +
16290 + if (len > pax_task_size || addr > pax_task_size - len)
16291 + return -EINVAL;
16292 +
16293 + return 0;
16294 +}
16295 +
16296 /*
16297 * Perform the select(nd, in, out, ex, tv) and mmap() system
16298 * calls. Linux/i386 didn't use to be able to handle more than
16299 @@ -58,6 +73,212 @@ out:
16300 return err;
16301 }
16302
16303 +unsigned long
16304 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
16305 + unsigned long len, unsigned long pgoff, unsigned long flags)
16306 +{
16307 + struct mm_struct *mm = current->mm;
16308 + struct vm_area_struct *vma;
16309 + unsigned long start_addr, pax_task_size = TASK_SIZE;
16310 +
16311 +#ifdef CONFIG_PAX_SEGMEXEC
16312 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16313 + pax_task_size = SEGMEXEC_TASK_SIZE;
16314 +#endif
16315 +
16316 + pax_task_size -= PAGE_SIZE;
16317 +
16318 + if (len > pax_task_size)
16319 + return -ENOMEM;
16320 +
16321 + if (flags & MAP_FIXED)
16322 + return addr;
16323 +
16324 +#ifdef CONFIG_PAX_RANDMMAP
16325 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16326 +#endif
16327 +
16328 + if (addr) {
16329 + addr = PAGE_ALIGN(addr);
16330 + if (pax_task_size - len >= addr) {
16331 + vma = find_vma(mm, addr);
16332 + if (check_heap_stack_gap(vma, addr, len))
16333 + return addr;
16334 + }
16335 + }
16336 + if (len > mm->cached_hole_size) {
16337 + start_addr = addr = mm->free_area_cache;
16338 + } else {
16339 + start_addr = addr = mm->mmap_base;
16340 + mm->cached_hole_size = 0;
16341 + }
16342 +
16343 +#ifdef CONFIG_PAX_PAGEEXEC
16344 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16345 + start_addr = 0x00110000UL;
16346 +
16347 +#ifdef CONFIG_PAX_RANDMMAP
16348 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16349 + start_addr += mm->delta_mmap & 0x03FFF000UL;
16350 +#endif
16351 +
16352 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16353 + start_addr = addr = mm->mmap_base;
16354 + else
16355 + addr = start_addr;
16356 + }
16357 +#endif
16358 +
16359 +full_search:
16360 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16361 + /* At this point: (!vma || addr < vma->vm_end). */
16362 + if (pax_task_size - len < addr) {
16363 + /*
16364 + * Start a new search - just in case we missed
16365 + * some holes.
16366 + */
16367 + if (start_addr != mm->mmap_base) {
16368 + start_addr = addr = mm->mmap_base;
16369 + mm->cached_hole_size = 0;
16370 + goto full_search;
16371 + }
16372 + return -ENOMEM;
16373 + }
16374 + if (check_heap_stack_gap(vma, addr, len))
16375 + break;
16376 + if (addr + mm->cached_hole_size < vma->vm_start)
16377 + mm->cached_hole_size = vma->vm_start - addr;
16378 + addr = vma->vm_end;
16379 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
16380 + start_addr = addr = mm->mmap_base;
16381 + mm->cached_hole_size = 0;
16382 + goto full_search;
16383 + }
16384 + }
16385 +
16386 + /*
16387 + * Remember the place where we stopped the search:
16388 + */
16389 + mm->free_area_cache = addr + len;
16390 + return addr;
16391 +}
16392 +
16393 +unsigned long
16394 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16395 + const unsigned long len, const unsigned long pgoff,
16396 + const unsigned long flags)
16397 +{
16398 + struct vm_area_struct *vma;
16399 + struct mm_struct *mm = current->mm;
16400 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16401 +
16402 +#ifdef CONFIG_PAX_SEGMEXEC
16403 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16404 + pax_task_size = SEGMEXEC_TASK_SIZE;
16405 +#endif
16406 +
16407 + pax_task_size -= PAGE_SIZE;
16408 +
16409 + /* requested length too big for entire address space */
16410 + if (len > pax_task_size)
16411 + return -ENOMEM;
16412 +
16413 + if (flags & MAP_FIXED)
16414 + return addr;
16415 +
16416 +#ifdef CONFIG_PAX_PAGEEXEC
16417 + if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16418 + goto bottomup;
16419 +#endif
16420 +
16421 +#ifdef CONFIG_PAX_RANDMMAP
16422 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16423 +#endif
16424 +
16425 + /* requesting a specific address */
16426 + if (addr) {
16427 + addr = PAGE_ALIGN(addr);
16428 + if (pax_task_size - len >= addr) {
16429 + vma = find_vma(mm, addr);
16430 + if (check_heap_stack_gap(vma, addr, len))
16431 + return addr;
16432 + }
16433 + }
16434 +
16435 + /* check if free_area_cache is useful for us */
16436 + if (len <= mm->cached_hole_size) {
16437 + mm->cached_hole_size = 0;
16438 + mm->free_area_cache = mm->mmap_base;
16439 + }
16440 +
16441 + /* either no address requested or can't fit in requested address hole */
16442 + addr = mm->free_area_cache;
16443 +
16444 + /* make sure it can fit in the remaining address space */
16445 + if (addr > len) {
16446 + vma = find_vma(mm, addr-len);
16447 + if (check_heap_stack_gap(vma, addr - len, len))
16448 + /* remember the address as a hint for next time */
16449 + return (mm->free_area_cache = addr-len);
16450 + }
16451 +
16452 + if (mm->mmap_base < len)
16453 + goto bottomup;
16454 +
16455 + addr = mm->mmap_base-len;
16456 +
16457 + do {
16458 + /*
16459 + * Lookup failure means no vma is above this address,
16460 + * else if new region fits below vma->vm_start,
16461 + * return with success:
16462 + */
16463 + vma = find_vma(mm, addr);
16464 + if (check_heap_stack_gap(vma, addr, len))
16465 + /* remember the address as a hint for next time */
16466 + return (mm->free_area_cache = addr);
16467 +
16468 + /* remember the largest hole we saw so far */
16469 + if (addr + mm->cached_hole_size < vma->vm_start)
16470 + mm->cached_hole_size = vma->vm_start - addr;
16471 +
16472 + /* try just below the current vma->vm_start */
16473 + addr = skip_heap_stack_gap(vma, len);
16474 + } while (!IS_ERR_VALUE(addr));
16475 +
16476 +bottomup:
16477 + /*
16478 + * A failed mmap() very likely causes application failure,
16479 + * so fall back to the bottom-up function here. This scenario
16480 + * can happen with large stack limits and large mmap()
16481 + * allocations.
16482 + */
16483 +
16484 +#ifdef CONFIG_PAX_SEGMEXEC
16485 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
16486 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
16487 + else
16488 +#endif
16489 +
16490 + mm->mmap_base = TASK_UNMAPPED_BASE;
16491 +
16492 +#ifdef CONFIG_PAX_RANDMMAP
16493 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16494 + mm->mmap_base += mm->delta_mmap;
16495 +#endif
16496 +
16497 + mm->free_area_cache = mm->mmap_base;
16498 + mm->cached_hole_size = ~0UL;
16499 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16500 + /*
16501 + * Restore the topdown base:
16502 + */
16503 + mm->mmap_base = base;
16504 + mm->free_area_cache = base;
16505 + mm->cached_hole_size = ~0UL;
16506 +
16507 + return addr;
16508 +}
16509
16510 struct sel_arg_struct {
16511 unsigned long n;
16512 @@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
16513 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
16514 case SEMTIMEDOP:
16515 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
16516 - (const struct timespec __user *)fifth);
16517 + (__force const struct timespec __user *)fifth);
16518
16519 case SEMGET:
16520 return sys_semget(first, second, third);
16521 @@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
16522 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
16523 if (ret)
16524 return ret;
16525 - return put_user(raddr, (ulong __user *) third);
16526 + return put_user(raddr, (__force ulong __user *) third);
16527 }
16528 case 1: /* iBCS2 emulator entry point */
16529 if (!segment_eq(get_fs(), get_ds()))
16530 @@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
16531
16532 return error;
16533 }
16534 -
16535 -
16536 -/*
16537 - * Do a system call from kernel instead of calling sys_execve so we
16538 - * end up with proper pt_regs.
16539 - */
16540 -int kernel_execve(const char *filename, char *const argv[], char *const envp[])
16541 -{
16542 - long __res;
16543 - asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
16544 - : "=a" (__res)
16545 - : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
16546 - return __res;
16547 -}
16548 diff -urNp linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c
16549 --- linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
16550 +++ linux-2.6.32.42/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
16551 @@ -32,8 +32,8 @@ out:
16552 return error;
16553 }
16554
16555 -static void find_start_end(unsigned long flags, unsigned long *begin,
16556 - unsigned long *end)
16557 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
16558 + unsigned long *begin, unsigned long *end)
16559 {
16560 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
16561 unsigned long new_begin;
16562 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
16563 *begin = new_begin;
16564 }
16565 } else {
16566 - *begin = TASK_UNMAPPED_BASE;
16567 + *begin = mm->mmap_base;
16568 *end = TASK_SIZE;
16569 }
16570 }
16571 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
16572 if (flags & MAP_FIXED)
16573 return addr;
16574
16575 - find_start_end(flags, &begin, &end);
16576 + find_start_end(mm, flags, &begin, &end);
16577
16578 if (len > end)
16579 return -ENOMEM;
16580
16581 +#ifdef CONFIG_PAX_RANDMMAP
16582 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16583 +#endif
16584 +
16585 if (addr) {
16586 addr = PAGE_ALIGN(addr);
16587 vma = find_vma(mm, addr);
16588 - if (end - len >= addr &&
16589 - (!vma || addr + len <= vma->vm_start))
16590 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
16591 return addr;
16592 }
16593 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
16594 @@ -106,7 +109,7 @@ full_search:
16595 }
16596 return -ENOMEM;
16597 }
16598 - if (!vma || addr + len <= vma->vm_start) {
16599 + if (check_heap_stack_gap(vma, addr, len)) {
16600 /*
16601 * Remember the place where we stopped the search:
16602 */
16603 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
16604 {
16605 struct vm_area_struct *vma;
16606 struct mm_struct *mm = current->mm;
16607 - unsigned long addr = addr0;
16608 + unsigned long base = mm->mmap_base, addr = addr0;
16609
16610 /* requested length too big for entire address space */
16611 if (len > TASK_SIZE)
16612 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
16613 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
16614 goto bottomup;
16615
16616 +#ifdef CONFIG_PAX_RANDMMAP
16617 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16618 +#endif
16619 +
16620 /* requesting a specific address */
16621 if (addr) {
16622 addr = PAGE_ALIGN(addr);
16623 - vma = find_vma(mm, addr);
16624 - if (TASK_SIZE - len >= addr &&
16625 - (!vma || addr + len <= vma->vm_start))
16626 - return addr;
16627 + if (TASK_SIZE - len >= addr) {
16628 + vma = find_vma(mm, addr);
16629 + if (check_heap_stack_gap(vma, addr, len))
16630 + return addr;
16631 + }
16632 }
16633
16634 /* check if free_area_cache is useful for us */
16635 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
16636 /* make sure it can fit in the remaining address space */
16637 if (addr > len) {
16638 vma = find_vma(mm, addr-len);
16639 - if (!vma || addr <= vma->vm_start)
16640 + if (check_heap_stack_gap(vma, addr - len, len))
16641 /* remember the address as a hint for next time */
16642 return mm->free_area_cache = addr-len;
16643 }
16644 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
16645 * return with success:
16646 */
16647 vma = find_vma(mm, addr);
16648 - if (!vma || addr+len <= vma->vm_start)
16649 + if (check_heap_stack_gap(vma, addr, len))
16650 /* remember the address as a hint for next time */
16651 return mm->free_area_cache = addr;
16652
16653 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
16654 mm->cached_hole_size = vma->vm_start - addr;
16655
16656 /* try just below the current vma->vm_start */
16657 - addr = vma->vm_start-len;
16658 - } while (len < vma->vm_start);
16659 + addr = skip_heap_stack_gap(vma, len);
16660 + } while (!IS_ERR_VALUE(addr));
16661
16662 bottomup:
16663 /*
16664 @@ -198,13 +206,21 @@ bottomup:
16665 * can happen with large stack limits and large mmap()
16666 * allocations.
16667 */
16668 + mm->mmap_base = TASK_UNMAPPED_BASE;
16669 +
16670 +#ifdef CONFIG_PAX_RANDMMAP
16671 + if (mm->pax_flags & MF_PAX_RANDMMAP)
16672 + mm->mmap_base += mm->delta_mmap;
16673 +#endif
16674 +
16675 + mm->free_area_cache = mm->mmap_base;
16676 mm->cached_hole_size = ~0UL;
16677 - mm->free_area_cache = TASK_UNMAPPED_BASE;
16678 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16679 /*
16680 * Restore the topdown base:
16681 */
16682 - mm->free_area_cache = mm->mmap_base;
16683 + mm->mmap_base = base;
16684 + mm->free_area_cache = base;
16685 mm->cached_hole_size = ~0UL;
16686
16687 return addr;
16688 diff -urNp linux-2.6.32.42/arch/x86/kernel/tboot.c linux-2.6.32.42/arch/x86/kernel/tboot.c
16689 --- linux-2.6.32.42/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
16690 +++ linux-2.6.32.42/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
16691 @@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
16692
16693 void tboot_shutdown(u32 shutdown_type)
16694 {
16695 - void (*shutdown)(void);
16696 + void (* __noreturn shutdown)(void);
16697
16698 if (!tboot_enabled())
16699 return;
16700 @@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
16701
16702 switch_to_tboot_pt();
16703
16704 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
16705 + shutdown = (void *)tboot->shutdown_entry;
16706 shutdown();
16707
16708 /* should not reach here */
16709 @@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
16710 tboot_shutdown(acpi_shutdown_map[sleep_state]);
16711 }
16712
16713 -static atomic_t ap_wfs_count;
16714 +static atomic_unchecked_t ap_wfs_count;
16715
16716 static int tboot_wait_for_aps(int num_aps)
16717 {
16718 @@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
16719 {
16720 switch (action) {
16721 case CPU_DYING:
16722 - atomic_inc(&ap_wfs_count);
16723 + atomic_inc_unchecked(&ap_wfs_count);
16724 if (num_online_cpus() == 1)
16725 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
16726 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
16727 return NOTIFY_BAD;
16728 break;
16729 }
16730 @@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
16731
16732 tboot_create_trampoline();
16733
16734 - atomic_set(&ap_wfs_count, 0);
16735 + atomic_set_unchecked(&ap_wfs_count, 0);
16736 register_hotcpu_notifier(&tboot_cpu_notifier);
16737 return 0;
16738 }
16739 diff -urNp linux-2.6.32.42/arch/x86/kernel/time.c linux-2.6.32.42/arch/x86/kernel/time.c
16740 --- linux-2.6.32.42/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
16741 +++ linux-2.6.32.42/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
16742 @@ -26,17 +26,13 @@
16743 int timer_ack;
16744 #endif
16745
16746 -#ifdef CONFIG_X86_64
16747 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
16748 -#endif
16749 -
16750 unsigned long profile_pc(struct pt_regs *regs)
16751 {
16752 unsigned long pc = instruction_pointer(regs);
16753
16754 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
16755 + if (!user_mode(regs) && in_lock_functions(pc)) {
16756 #ifdef CONFIG_FRAME_POINTER
16757 - return *(unsigned long *)(regs->bp + sizeof(long));
16758 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
16759 #else
16760 unsigned long *sp =
16761 (unsigned long *)kernel_stack_pointer(regs);
16762 @@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
16763 * or above a saved flags. Eflags has bits 22-31 zero,
16764 * kernel addresses don't.
16765 */
16766 +
16767 +#ifdef CONFIG_PAX_KERNEXEC
16768 + return ktla_ktva(sp[0]);
16769 +#else
16770 if (sp[0] >> 22)
16771 return sp[0];
16772 if (sp[1] >> 22)
16773 return sp[1];
16774 #endif
16775 +
16776 +#endif
16777 }
16778 return pc;
16779 }
16780 diff -urNp linux-2.6.32.42/arch/x86/kernel/tls.c linux-2.6.32.42/arch/x86/kernel/tls.c
16781 --- linux-2.6.32.42/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
16782 +++ linux-2.6.32.42/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
16783 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
16784 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
16785 return -EINVAL;
16786
16787 +#ifdef CONFIG_PAX_SEGMEXEC
16788 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
16789 + return -EINVAL;
16790 +#endif
16791 +
16792 set_tls_desc(p, idx, &info, 1);
16793
16794 return 0;
16795 diff -urNp linux-2.6.32.42/arch/x86/kernel/trampoline_32.S linux-2.6.32.42/arch/x86/kernel/trampoline_32.S
16796 --- linux-2.6.32.42/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
16797 +++ linux-2.6.32.42/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
16798 @@ -32,6 +32,12 @@
16799 #include <asm/segment.h>
16800 #include <asm/page_types.h>
16801
16802 +#ifdef CONFIG_PAX_KERNEXEC
16803 +#define ta(X) (X)
16804 +#else
16805 +#define ta(X) ((X) - __PAGE_OFFSET)
16806 +#endif
16807 +
16808 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
16809 __CPUINITRODATA
16810 .code16
16811 @@ -60,7 +66,7 @@ r_base = .
16812 inc %ax # protected mode (PE) bit
16813 lmsw %ax # into protected mode
16814 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
16815 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
16816 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
16817
16818 # These need to be in the same 64K segment as the above;
16819 # hence we don't use the boot_gdt_descr defined in head.S
16820 diff -urNp linux-2.6.32.42/arch/x86/kernel/trampoline_64.S linux-2.6.32.42/arch/x86/kernel/trampoline_64.S
16821 --- linux-2.6.32.42/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
16822 +++ linux-2.6.32.42/arch/x86/kernel/trampoline_64.S 2011-04-17 15:56:46.000000000 -0400
16823 @@ -91,7 +91,7 @@ startup_32:
16824 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
16825 movl %eax, %ds
16826
16827 - movl $X86_CR4_PAE, %eax
16828 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
16829 movl %eax, %cr4 # Enable PAE mode
16830
16831 # Setup trampoline 4 level pagetables
16832 @@ -138,7 +138,7 @@ tidt:
16833 # so the kernel can live anywhere
16834 .balign 4
16835 tgdt:
16836 - .short tgdt_end - tgdt # gdt limit
16837 + .short tgdt_end - tgdt - 1 # gdt limit
16838 .long tgdt - r_base
16839 .short 0
16840 .quad 0x00cf9b000000ffff # __KERNEL32_CS
16841 diff -urNp linux-2.6.32.42/arch/x86/kernel/traps.c linux-2.6.32.42/arch/x86/kernel/traps.c
16842 --- linux-2.6.32.42/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
16843 +++ linux-2.6.32.42/arch/x86/kernel/traps.c 2011-04-17 15:56:46.000000000 -0400
16844 @@ -69,12 +69,6 @@ asmlinkage int system_call(void);
16845
16846 /* Do we ignore FPU interrupts ? */
16847 char ignore_fpu_irq;
16848 -
16849 -/*
16850 - * The IDT has to be page-aligned to simplify the Pentium
16851 - * F0 0F bug workaround.
16852 - */
16853 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
16854 #endif
16855
16856 DECLARE_BITMAP(used_vectors, NR_VECTORS);
16857 @@ -112,19 +106,19 @@ static inline void preempt_conditional_c
16858 static inline void
16859 die_if_kernel(const char *str, struct pt_regs *regs, long err)
16860 {
16861 - if (!user_mode_vm(regs))
16862 + if (!user_mode(regs))
16863 die(str, regs, err);
16864 }
16865 #endif
16866
16867 static void __kprobes
16868 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
16869 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
16870 long error_code, siginfo_t *info)
16871 {
16872 struct task_struct *tsk = current;
16873
16874 #ifdef CONFIG_X86_32
16875 - if (regs->flags & X86_VM_MASK) {
16876 + if (v8086_mode(regs)) {
16877 /*
16878 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
16879 * On nmi (interrupt 2), do_trap should not be called.
16880 @@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
16881 }
16882 #endif
16883
16884 - if (!user_mode(regs))
16885 + if (!user_mode_novm(regs))
16886 goto kernel_trap;
16887
16888 #ifdef CONFIG_X86_32
16889 @@ -158,7 +152,7 @@ trap_signal:
16890 printk_ratelimit()) {
16891 printk(KERN_INFO
16892 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
16893 - tsk->comm, tsk->pid, str,
16894 + tsk->comm, task_pid_nr(tsk), str,
16895 regs->ip, regs->sp, error_code);
16896 print_vma_addr(" in ", regs->ip);
16897 printk("\n");
16898 @@ -175,8 +169,20 @@ kernel_trap:
16899 if (!fixup_exception(regs)) {
16900 tsk->thread.error_code = error_code;
16901 tsk->thread.trap_no = trapnr;
16902 +
16903 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16904 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
16905 + str = "PAX: suspicious stack segment fault";
16906 +#endif
16907 +
16908 die(str, regs, error_code);
16909 }
16910 +
16911 +#ifdef CONFIG_PAX_REFCOUNT
16912 + if (trapnr == 4)
16913 + pax_report_refcount_overflow(regs);
16914 +#endif
16915 +
16916 return;
16917
16918 #ifdef CONFIG_X86_32
16919 @@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
16920 conditional_sti(regs);
16921
16922 #ifdef CONFIG_X86_32
16923 - if (regs->flags & X86_VM_MASK)
16924 + if (v8086_mode(regs))
16925 goto gp_in_vm86;
16926 #endif
16927
16928 tsk = current;
16929 - if (!user_mode(regs))
16930 + if (!user_mode_novm(regs))
16931 goto gp_in_kernel;
16932
16933 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
16934 + if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
16935 + struct mm_struct *mm = tsk->mm;
16936 + unsigned long limit;
16937 +
16938 + down_write(&mm->mmap_sem);
16939 + limit = mm->context.user_cs_limit;
16940 + if (limit < TASK_SIZE) {
16941 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
16942 + up_write(&mm->mmap_sem);
16943 + return;
16944 + }
16945 + up_write(&mm->mmap_sem);
16946 + }
16947 +#endif
16948 +
16949 tsk->thread.error_code = error_code;
16950 tsk->thread.trap_no = 13;
16951
16952 @@ -305,6 +327,13 @@ gp_in_kernel:
16953 if (notify_die(DIE_GPF, "general protection fault", regs,
16954 error_code, 13, SIGSEGV) == NOTIFY_STOP)
16955 return;
16956 +
16957 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16958 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
16959 + die("PAX: suspicious general protection fault", regs, error_code);
16960 + else
16961 +#endif
16962 +
16963 die("general protection fault", regs, error_code);
16964 }
16965
16966 @@ -558,7 +587,7 @@ dotraplinkage void __kprobes do_debug(st
16967 }
16968
16969 #ifdef CONFIG_X86_32
16970 - if (regs->flags & X86_VM_MASK)
16971 + if (v8086_mode(regs))
16972 goto debug_vm86;
16973 #endif
16974
16975 @@ -570,7 +599,7 @@ dotraplinkage void __kprobes do_debug(st
16976 * kernel space (but re-enable TF when returning to user mode).
16977 */
16978 if (condition & DR_STEP) {
16979 - if (!user_mode(regs))
16980 + if (!user_mode_novm(regs))
16981 goto clear_TF_reenable;
16982 }
16983
16984 @@ -757,7 +786,7 @@ do_simd_coprocessor_error(struct pt_regs
16985 * Handle strange cache flush from user space exception
16986 * in all other cases. This is undocumented behaviour.
16987 */
16988 - if (regs->flags & X86_VM_MASK) {
16989 + if (v8086_mode(regs)) {
16990 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
16991 return;
16992 }
16993 @@ -798,7 +827,7 @@ asmlinkage void __attribute__((weak)) sm
16994 void __math_state_restore(void)
16995 {
16996 struct thread_info *thread = current_thread_info();
16997 - struct task_struct *tsk = thread->task;
16998 + struct task_struct *tsk = current;
16999
17000 /*
17001 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17002 @@ -825,8 +854,7 @@ void __math_state_restore(void)
17003 */
17004 asmlinkage void math_state_restore(void)
17005 {
17006 - struct thread_info *thread = current_thread_info();
17007 - struct task_struct *tsk = thread->task;
17008 + struct task_struct *tsk = current;
17009
17010 if (!tsk_used_math(tsk)) {
17011 local_irq_enable();
17012 diff -urNp linux-2.6.32.42/arch/x86/kernel/vm86_32.c linux-2.6.32.42/arch/x86/kernel/vm86_32.c
17013 --- linux-2.6.32.42/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
17014 +++ linux-2.6.32.42/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
17015 @@ -41,6 +41,7 @@
17016 #include <linux/ptrace.h>
17017 #include <linux/audit.h>
17018 #include <linux/stddef.h>
17019 +#include <linux/grsecurity.h>
17020
17021 #include <asm/uaccess.h>
17022 #include <asm/io.h>
17023 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
17024 do_exit(SIGSEGV);
17025 }
17026
17027 - tss = &per_cpu(init_tss, get_cpu());
17028 + tss = init_tss + get_cpu();
17029 current->thread.sp0 = current->thread.saved_sp0;
17030 current->thread.sysenter_cs = __KERNEL_CS;
17031 load_sp0(tss, &current->thread);
17032 @@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
17033 struct task_struct *tsk;
17034 int tmp, ret = -EPERM;
17035
17036 +#ifdef CONFIG_GRKERNSEC_VM86
17037 + if (!capable(CAP_SYS_RAWIO)) {
17038 + gr_handle_vm86();
17039 + goto out;
17040 + }
17041 +#endif
17042 +
17043 tsk = current;
17044 if (tsk->thread.saved_sp0)
17045 goto out;
17046 @@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
17047 int tmp, ret;
17048 struct vm86plus_struct __user *v86;
17049
17050 +#ifdef CONFIG_GRKERNSEC_VM86
17051 + if (!capable(CAP_SYS_RAWIO)) {
17052 + gr_handle_vm86();
17053 + ret = -EPERM;
17054 + goto out;
17055 + }
17056 +#endif
17057 +
17058 tsk = current;
17059 switch (regs->bx) {
17060 case VM86_REQUEST_IRQ:
17061 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
17062 tsk->thread.saved_fs = info->regs32->fs;
17063 tsk->thread.saved_gs = get_user_gs(info->regs32);
17064
17065 - tss = &per_cpu(init_tss, get_cpu());
17066 + tss = init_tss + get_cpu();
17067 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17068 if (cpu_has_sep)
17069 tsk->thread.sysenter_cs = 0;
17070 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
17071 goto cannot_handle;
17072 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17073 goto cannot_handle;
17074 - intr_ptr = (unsigned long __user *) (i << 2);
17075 + intr_ptr = (__force unsigned long __user *) (i << 2);
17076 if (get_user(segoffs, intr_ptr))
17077 goto cannot_handle;
17078 if ((segoffs >> 16) == BIOSSEG)
17079 diff -urNp linux-2.6.32.42/arch/x86/kernel/vmi_32.c linux-2.6.32.42/arch/x86/kernel/vmi_32.c
17080 --- linux-2.6.32.42/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
17081 +++ linux-2.6.32.42/arch/x86/kernel/vmi_32.c 2011-04-17 15:56:46.000000000 -0400
17082 @@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
17083 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
17084
17085 #define call_vrom_func(rom,func) \
17086 - (((VROMFUNC *)(rom->func))())
17087 + (((VROMFUNC *)(ktva_ktla(rom.func)))())
17088
17089 #define call_vrom_long_func(rom,func,arg) \
17090 - (((VROMLONGFUNC *)(rom->func)) (arg))
17091 +({\
17092 + u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
17093 + struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
17094 + __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
17095 + __reloc;\
17096 +})
17097
17098 -static struct vrom_header *vmi_rom;
17099 +static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
17100 static int disable_pge;
17101 static int disable_pse;
17102 static int disable_sep;
17103 @@ -76,10 +81,10 @@ static struct {
17104 void (*set_initial_ap_state)(int, int);
17105 void (*halt)(void);
17106 void (*set_lazy_mode)(int mode);
17107 -} vmi_ops;
17108 +} vmi_ops __read_only;
17109
17110 /* Cached VMI operations */
17111 -struct vmi_timer_ops vmi_timer_ops;
17112 +struct vmi_timer_ops vmi_timer_ops __read_only;
17113
17114 /*
17115 * VMI patching routines.
17116 @@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
17117 static inline void patch_offset(void *insnbuf,
17118 unsigned long ip, unsigned long dest)
17119 {
17120 - *(unsigned long *)(insnbuf+1) = dest-ip-5;
17121 + *(unsigned long *)(insnbuf+1) = dest-ip-5;
17122 }
17123
17124 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
17125 @@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
17126 {
17127 u64 reloc;
17128 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
17129 +
17130 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
17131 switch(rel->type) {
17132 case VMI_RELOCATION_CALL_REL:
17133 @@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
17134
17135 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
17136 {
17137 - const pte_t pte = { .pte = 0 };
17138 + const pte_t pte = __pte(0ULL);
17139 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
17140 }
17141
17142 static void vmi_pmd_clear(pmd_t *pmd)
17143 {
17144 - const pte_t pte = { .pte = 0 };
17145 + const pte_t pte = __pte(0ULL);
17146 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
17147 }
17148 #endif
17149 @@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
17150 ap.ss = __KERNEL_DS;
17151 ap.esp = (unsigned long) start_esp;
17152
17153 - ap.ds = __USER_DS;
17154 - ap.es = __USER_DS;
17155 + ap.ds = __KERNEL_DS;
17156 + ap.es = __KERNEL_DS;
17157 ap.fs = __KERNEL_PERCPU;
17158 - ap.gs = __KERNEL_STACK_CANARY;
17159 + savesegment(gs, ap.gs);
17160
17161 ap.eflags = 0;
17162
17163 @@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
17164 paravirt_leave_lazy_mmu();
17165 }
17166
17167 +#ifdef CONFIG_PAX_KERNEXEC
17168 +static unsigned long vmi_pax_open_kernel(void)
17169 +{
17170 + return 0;
17171 +}
17172 +
17173 +static unsigned long vmi_pax_close_kernel(void)
17174 +{
17175 + return 0;
17176 +}
17177 +#endif
17178 +
17179 static inline int __init check_vmi_rom(struct vrom_header *rom)
17180 {
17181 struct pci_header *pci;
17182 @@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
17183 return 0;
17184 if (rom->vrom_signature != VMI_SIGNATURE)
17185 return 0;
17186 + if (rom->rom_length * 512 > sizeof(*rom)) {
17187 + printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
17188 + return 0;
17189 + }
17190 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
17191 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
17192 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
17193 @@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
17194 struct vrom_header *romstart;
17195 romstart = (struct vrom_header *)isa_bus_to_virt(base);
17196 if (check_vmi_rom(romstart)) {
17197 - vmi_rom = romstart;
17198 + vmi_rom = *romstart;
17199 return 1;
17200 }
17201 }
17202 @@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
17203
17204 para_fill(pv_irq_ops.safe_halt, Halt);
17205
17206 +#ifdef CONFIG_PAX_KERNEXEC
17207 + pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
17208 + pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
17209 +#endif
17210 +
17211 /*
17212 * Alternative instruction rewriting doesn't happen soon enough
17213 * to convert VMI_IRET to a call instead of a jump; so we have
17214 @@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
17215
17216 void __init vmi_init(void)
17217 {
17218 - if (!vmi_rom)
17219 + if (!vmi_rom.rom_signature)
17220 probe_vmi_rom();
17221 else
17222 - check_vmi_rom(vmi_rom);
17223 + check_vmi_rom(&vmi_rom);
17224
17225 /* In case probing for or validating the ROM failed, basil */
17226 - if (!vmi_rom)
17227 + if (!vmi_rom.rom_signature)
17228 return;
17229
17230 - reserve_top_address(-vmi_rom->virtual_top);
17231 + reserve_top_address(-vmi_rom.virtual_top);
17232
17233 #ifdef CONFIG_X86_IO_APIC
17234 /* This is virtual hardware; timer routing is wired correctly */
17235 @@ -874,7 +901,7 @@ void __init vmi_activate(void)
17236 {
17237 unsigned long flags;
17238
17239 - if (!vmi_rom)
17240 + if (!vmi_rom.rom_signature)
17241 return;
17242
17243 local_irq_save(flags);
17244 diff -urNp linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S
17245 --- linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
17246 +++ linux-2.6.32.42/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
17247 @@ -26,6 +26,13 @@
17248 #include <asm/page_types.h>
17249 #include <asm/cache.h>
17250 #include <asm/boot.h>
17251 +#include <asm/segment.h>
17252 +
17253 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17254 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17255 +#else
17256 +#define __KERNEL_TEXT_OFFSET 0
17257 +#endif
17258
17259 #undef i386 /* in case the preprocessor is a 32bit one */
17260
17261 @@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
17262 #ifdef CONFIG_X86_32
17263 OUTPUT_ARCH(i386)
17264 ENTRY(phys_startup_32)
17265 -jiffies = jiffies_64;
17266 #else
17267 OUTPUT_ARCH(i386:x86-64)
17268 ENTRY(phys_startup_64)
17269 -jiffies_64 = jiffies;
17270 #endif
17271
17272 PHDRS {
17273 text PT_LOAD FLAGS(5); /* R_E */
17274 - data PT_LOAD FLAGS(7); /* RWE */
17275 +#ifdef CONFIG_X86_32
17276 + module PT_LOAD FLAGS(5); /* R_E */
17277 +#endif
17278 +#ifdef CONFIG_XEN
17279 + rodata PT_LOAD FLAGS(5); /* R_E */
17280 +#else
17281 + rodata PT_LOAD FLAGS(4); /* R__ */
17282 +#endif
17283 + data PT_LOAD FLAGS(6); /* RW_ */
17284 #ifdef CONFIG_X86_64
17285 user PT_LOAD FLAGS(5); /* R_E */
17286 +#endif
17287 + init.begin PT_LOAD FLAGS(6); /* RW_ */
17288 #ifdef CONFIG_SMP
17289 percpu PT_LOAD FLAGS(6); /* RW_ */
17290 #endif
17291 + text.init PT_LOAD FLAGS(5); /* R_E */
17292 + text.exit PT_LOAD FLAGS(5); /* R_E */
17293 init PT_LOAD FLAGS(7); /* RWE */
17294 -#endif
17295 note PT_NOTE FLAGS(0); /* ___ */
17296 }
17297
17298 SECTIONS
17299 {
17300 #ifdef CONFIG_X86_32
17301 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17302 - phys_startup_32 = startup_32 - LOAD_OFFSET;
17303 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17304 #else
17305 - . = __START_KERNEL;
17306 - phys_startup_64 = startup_64 - LOAD_OFFSET;
17307 + . = __START_KERNEL;
17308 #endif
17309
17310 /* Text and read-only data */
17311 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
17312 - _text = .;
17313 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17314 /* bootstrapping code */
17315 +#ifdef CONFIG_X86_32
17316 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17317 +#else
17318 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17319 +#endif
17320 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17321 + _text = .;
17322 HEAD_TEXT
17323 #ifdef CONFIG_X86_32
17324 . = ALIGN(PAGE_SIZE);
17325 @@ -82,28 +102,71 @@ SECTIONS
17326 IRQENTRY_TEXT
17327 *(.fixup)
17328 *(.gnu.warning)
17329 - /* End of text section */
17330 - _etext = .;
17331 } :text = 0x9090
17332
17333 - NOTES :text :note
17334 + . += __KERNEL_TEXT_OFFSET;
17335 +
17336 +#ifdef CONFIG_X86_32
17337 + . = ALIGN(PAGE_SIZE);
17338 + .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
17339 + *(.vmi.rom)
17340 + } :module
17341 +
17342 + . = ALIGN(PAGE_SIZE);
17343 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17344 +
17345 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17346 + MODULES_EXEC_VADDR = .;
17347 + BYTE(0)
17348 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17349 + . = ALIGN(HPAGE_SIZE);
17350 + MODULES_EXEC_END = . - 1;
17351 +#endif
17352 +
17353 + } :module
17354 +#endif
17355
17356 - EXCEPTION_TABLE(16) :text = 0x9090
17357 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17358 + /* End of text section */
17359 + _etext = . - __KERNEL_TEXT_OFFSET;
17360 + }
17361 +
17362 +#ifdef CONFIG_X86_32
17363 + . = ALIGN(PAGE_SIZE);
17364 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17365 + *(.idt)
17366 + . = ALIGN(PAGE_SIZE);
17367 + *(.empty_zero_page)
17368 + *(.swapper_pg_fixmap)
17369 + *(.swapper_pg_pmd)
17370 + *(.swapper_pg_dir)
17371 + *(.trampoline_pg_dir)
17372 + } :rodata
17373 +#endif
17374 +
17375 + . = ALIGN(PAGE_SIZE);
17376 + NOTES :rodata :note
17377 +
17378 + EXCEPTION_TABLE(16) :rodata
17379
17380 RO_DATA(PAGE_SIZE)
17381
17382 /* Data */
17383 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17384 +
17385 +#ifdef CONFIG_PAX_KERNEXEC
17386 + . = ALIGN(HPAGE_SIZE);
17387 +#else
17388 + . = ALIGN(PAGE_SIZE);
17389 +#endif
17390 +
17391 /* Start of data section */
17392 _sdata = .;
17393
17394 /* init_task */
17395 INIT_TASK_DATA(THREAD_SIZE)
17396
17397 -#ifdef CONFIG_X86_32
17398 - /* 32 bit has nosave before _edata */
17399 NOSAVE_DATA
17400 -#endif
17401
17402 PAGE_ALIGNED_DATA(PAGE_SIZE)
17403
17404 @@ -112,6 +175,8 @@ SECTIONS
17405 DATA_DATA
17406 CONSTRUCTORS
17407
17408 + jiffies = jiffies_64;
17409 +
17410 /* rarely changed data like cpu maps */
17411 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
17412
17413 @@ -166,12 +231,6 @@ SECTIONS
17414 }
17415 vgetcpu_mode = VVIRT(.vgetcpu_mode);
17416
17417 - . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
17418 - .jiffies : AT(VLOAD(.jiffies)) {
17419 - *(.jiffies)
17420 - }
17421 - jiffies = VVIRT(.jiffies);
17422 -
17423 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
17424 *(.vsyscall_3)
17425 }
17426 @@ -187,12 +246,19 @@ SECTIONS
17427 #endif /* CONFIG_X86_64 */
17428
17429 /* Init code and data - will be freed after init */
17430 - . = ALIGN(PAGE_SIZE);
17431 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
17432 + BYTE(0)
17433 +
17434 +#ifdef CONFIG_PAX_KERNEXEC
17435 + . = ALIGN(HPAGE_SIZE);
17436 +#else
17437 + . = ALIGN(PAGE_SIZE);
17438 +#endif
17439 +
17440 __init_begin = .; /* paired with __init_end */
17441 - }
17442 + } :init.begin
17443
17444 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
17445 +#ifdef CONFIG_SMP
17446 /*
17447 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
17448 * output PHDR, so the next output section - .init.text - should
17449 @@ -201,12 +267,27 @@ SECTIONS
17450 PERCPU_VADDR(0, :percpu)
17451 #endif
17452
17453 - INIT_TEXT_SECTION(PAGE_SIZE)
17454 -#ifdef CONFIG_X86_64
17455 - :init
17456 -#endif
17457 + . = ALIGN(PAGE_SIZE);
17458 + init_begin = .;
17459 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
17460 + VMLINUX_SYMBOL(_sinittext) = .;
17461 + INIT_TEXT
17462 + VMLINUX_SYMBOL(_einittext) = .;
17463 + . = ALIGN(PAGE_SIZE);
17464 + } :text.init
17465
17466 - INIT_DATA_SECTION(16)
17467 + /*
17468 + * .exit.text is discard at runtime, not link time, to deal with
17469 + * references from .altinstructions and .eh_frame
17470 + */
17471 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17472 + EXIT_TEXT
17473 + . = ALIGN(16);
17474 + } :text.exit
17475 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
17476 +
17477 + . = ALIGN(PAGE_SIZE);
17478 + INIT_DATA_SECTION(16) :init
17479
17480 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
17481 __x86_cpu_dev_start = .;
17482 @@ -232,19 +313,11 @@ SECTIONS
17483 *(.altinstr_replacement)
17484 }
17485
17486 - /*
17487 - * .exit.text is discard at runtime, not link time, to deal with
17488 - * references from .altinstructions and .eh_frame
17489 - */
17490 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
17491 - EXIT_TEXT
17492 - }
17493 -
17494 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
17495 EXIT_DATA
17496 }
17497
17498 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
17499 +#ifndef CONFIG_SMP
17500 PERCPU(PAGE_SIZE)
17501 #endif
17502
17503 @@ -267,12 +340,6 @@ SECTIONS
17504 . = ALIGN(PAGE_SIZE);
17505 }
17506
17507 -#ifdef CONFIG_X86_64
17508 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
17509 - NOSAVE_DATA
17510 - }
17511 -#endif
17512 -
17513 /* BSS */
17514 . = ALIGN(PAGE_SIZE);
17515 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
17516 @@ -288,6 +355,7 @@ SECTIONS
17517 __brk_base = .;
17518 . += 64 * 1024; /* 64k alignment slop space */
17519 *(.brk_reservation) /* areas brk users have reserved */
17520 + . = ALIGN(HPAGE_SIZE);
17521 __brk_limit = .;
17522 }
17523
17524 @@ -316,13 +384,12 @@ SECTIONS
17525 * for the boot processor.
17526 */
17527 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
17528 -INIT_PER_CPU(gdt_page);
17529 INIT_PER_CPU(irq_stack_union);
17530
17531 /*
17532 * Build-time check on the image size:
17533 */
17534 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
17535 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
17536 "kernel image bigger than KERNEL_IMAGE_SIZE");
17537
17538 #ifdef CONFIG_SMP
17539 diff -urNp linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c
17540 --- linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
17541 +++ linux-2.6.32.42/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
17542 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
17543
17544 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
17545 /* copy vsyscall data */
17546 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
17547 vsyscall_gtod_data.clock.vread = clock->vread;
17548 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
17549 vsyscall_gtod_data.clock.mask = clock->mask;
17550 @@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
17551 We do this here because otherwise user space would do it on
17552 its own in a likely inferior way (no access to jiffies).
17553 If you don't like it pass NULL. */
17554 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
17555 + if (tcache && tcache->blob[0] == (j = jiffies)) {
17556 p = tcache->blob[1];
17557 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
17558 /* Load per CPU data from RDTSCP */
17559 diff -urNp linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c
17560 --- linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
17561 +++ linux-2.6.32.42/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
17562 @@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
17563
17564 EXPORT_SYMBOL(copy_user_generic);
17565 EXPORT_SYMBOL(__copy_user_nocache);
17566 -EXPORT_SYMBOL(copy_from_user);
17567 -EXPORT_SYMBOL(copy_to_user);
17568 EXPORT_SYMBOL(__copy_from_user_inatomic);
17569
17570 EXPORT_SYMBOL(copy_page);
17571 diff -urNp linux-2.6.32.42/arch/x86/kernel/xsave.c linux-2.6.32.42/arch/x86/kernel/xsave.c
17572 --- linux-2.6.32.42/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
17573 +++ linux-2.6.32.42/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
17574 @@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
17575 fx_sw_user->xstate_size > fx_sw_user->extended_size)
17576 return -1;
17577
17578 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
17579 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
17580 fx_sw_user->extended_size -
17581 FP_XSTATE_MAGIC2_SIZE));
17582 /*
17583 @@ -196,7 +196,7 @@ fx_only:
17584 * the other extended state.
17585 */
17586 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
17587 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
17588 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
17589 }
17590
17591 /*
17592 @@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
17593 if (task_thread_info(tsk)->status & TS_XSAVE)
17594 err = restore_user_xstate(buf);
17595 else
17596 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
17597 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
17598 buf);
17599 if (unlikely(err)) {
17600 /*
17601 diff -urNp linux-2.6.32.42/arch/x86/kvm/emulate.c linux-2.6.32.42/arch/x86/kvm/emulate.c
17602 --- linux-2.6.32.42/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
17603 +++ linux-2.6.32.42/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
17604 @@ -81,8 +81,8 @@
17605 #define Src2CL (1<<29)
17606 #define Src2ImmByte (2<<29)
17607 #define Src2One (3<<29)
17608 -#define Src2Imm16 (4<<29)
17609 -#define Src2Mask (7<<29)
17610 +#define Src2Imm16 (4U<<29)
17611 +#define Src2Mask (7U<<29)
17612
17613 enum {
17614 Group1_80, Group1_81, Group1_82, Group1_83,
17615 @@ -411,6 +411,7 @@ static u32 group2_table[] = {
17616
17617 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
17618 do { \
17619 + unsigned long _tmp; \
17620 __asm__ __volatile__ ( \
17621 _PRE_EFLAGS("0", "4", "2") \
17622 _op _suffix " %"_x"3,%1; " \
17623 @@ -424,8 +425,6 @@ static u32 group2_table[] = {
17624 /* Raw emulation: instruction has two explicit operands. */
17625 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
17626 do { \
17627 - unsigned long _tmp; \
17628 - \
17629 switch ((_dst).bytes) { \
17630 case 2: \
17631 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
17632 @@ -441,7 +440,6 @@ static u32 group2_table[] = {
17633
17634 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
17635 do { \
17636 - unsigned long _tmp; \
17637 switch ((_dst).bytes) { \
17638 case 1: \
17639 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
17640 diff -urNp linux-2.6.32.42/arch/x86/kvm/lapic.c linux-2.6.32.42/arch/x86/kvm/lapic.c
17641 --- linux-2.6.32.42/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
17642 +++ linux-2.6.32.42/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
17643 @@ -52,7 +52,7 @@
17644 #define APIC_BUS_CYCLE_NS 1
17645
17646 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
17647 -#define apic_debug(fmt, arg...)
17648 +#define apic_debug(fmt, arg...) do {} while (0)
17649
17650 #define APIC_LVT_NUM 6
17651 /* 14 is the version for Xeon and Pentium 8.4.8*/
17652 diff -urNp linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h
17653 --- linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
17654 +++ linux-2.6.32.42/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
17655 @@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
17656 int level = PT_PAGE_TABLE_LEVEL;
17657 unsigned long mmu_seq;
17658
17659 + pax_track_stack();
17660 +
17661 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
17662 kvm_mmu_audit(vcpu, "pre page fault");
17663
17664 diff -urNp linux-2.6.32.42/arch/x86/kvm/svm.c linux-2.6.32.42/arch/x86/kvm/svm.c
17665 --- linux-2.6.32.42/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
17666 +++ linux-2.6.32.42/arch/x86/kvm/svm.c 2011-04-17 15:56:46.000000000 -0400
17667 @@ -2483,9 +2483,12 @@ static int handle_exit(struct kvm_run *k
17668 static void reload_tss(struct kvm_vcpu *vcpu)
17669 {
17670 int cpu = raw_smp_processor_id();
17671 -
17672 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
17673 +
17674 + pax_open_kernel();
17675 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
17676 + pax_close_kernel();
17677 +
17678 load_TR_desc();
17679 }
17680
17681 @@ -2946,7 +2949,7 @@ static bool svm_gb_page_enable(void)
17682 return true;
17683 }
17684
17685 -static struct kvm_x86_ops svm_x86_ops = {
17686 +static const struct kvm_x86_ops svm_x86_ops = {
17687 .cpu_has_kvm_support = has_svm,
17688 .disabled_by_bios = is_disabled,
17689 .hardware_setup = svm_hardware_setup,
17690 diff -urNp linux-2.6.32.42/arch/x86/kvm/vmx.c linux-2.6.32.42/arch/x86/kvm/vmx.c
17691 --- linux-2.6.32.42/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
17692 +++ linux-2.6.32.42/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
17693 @@ -570,7 +570,11 @@ static void reload_tss(void)
17694
17695 kvm_get_gdt(&gdt);
17696 descs = (void *)gdt.base;
17697 +
17698 + pax_open_kernel();
17699 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
17700 + pax_close_kernel();
17701 +
17702 load_TR_desc();
17703 }
17704
17705 @@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
17706 if (!cpu_has_vmx_flexpriority())
17707 flexpriority_enabled = 0;
17708
17709 - if (!cpu_has_vmx_tpr_shadow())
17710 - kvm_x86_ops->update_cr8_intercept = NULL;
17711 + if (!cpu_has_vmx_tpr_shadow()) {
17712 + pax_open_kernel();
17713 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
17714 + pax_close_kernel();
17715 + }
17716
17717 if (enable_ept && !cpu_has_vmx_ept_2m_page())
17718 kvm_disable_largepages();
17719 @@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
17720 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
17721
17722 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
17723 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
17724 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
17725 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
17726 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
17727 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
17728 @@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
17729 "jmp .Lkvm_vmx_return \n\t"
17730 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
17731 ".Lkvm_vmx_return: "
17732 +
17733 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17734 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
17735 + ".Lkvm_vmx_return2: "
17736 +#endif
17737 +
17738 /* Save guest registers, load host registers, keep flags */
17739 "xchg %0, (%%"R"sp) \n\t"
17740 "mov %%"R"ax, %c[rax](%0) \n\t"
17741 @@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
17742 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
17743 #endif
17744 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
17745 +
17746 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17747 + ,[cs]"i"(__KERNEL_CS)
17748 +#endif
17749 +
17750 : "cc", "memory"
17751 - , R"bx", R"di", R"si"
17752 + , R"ax", R"bx", R"di", R"si"
17753 #ifdef CONFIG_X86_64
17754 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
17755 #endif
17756 @@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
17757 if (vmx->rmode.irq.pending)
17758 fixup_rmode_irq(vmx);
17759
17760 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
17761 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
17762 +
17763 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17764 + loadsegment(fs, __KERNEL_PERCPU);
17765 +#endif
17766 +
17767 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17768 + __set_fs(current_thread_info()->addr_limit);
17769 +#endif
17770 +
17771 vmx->launched = 1;
17772
17773 vmx_complete_interrupts(vmx);
17774 @@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
17775 return false;
17776 }
17777
17778 -static struct kvm_x86_ops vmx_x86_ops = {
17779 +static const struct kvm_x86_ops vmx_x86_ops = {
17780 .cpu_has_kvm_support = cpu_has_kvm_support,
17781 .disabled_by_bios = vmx_disabled_by_bios,
17782 .hardware_setup = hardware_setup,
17783 diff -urNp linux-2.6.32.42/arch/x86/kvm/x86.c linux-2.6.32.42/arch/x86/kvm/x86.c
17784 --- linux-2.6.32.42/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
17785 +++ linux-2.6.32.42/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
17786 @@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
17787 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
17788 struct kvm_cpuid_entry2 __user *entries);
17789
17790 -struct kvm_x86_ops *kvm_x86_ops;
17791 +const struct kvm_x86_ops *kvm_x86_ops;
17792 EXPORT_SYMBOL_GPL(kvm_x86_ops);
17793
17794 int ignore_msrs = 0;
17795 @@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
17796 struct kvm_cpuid2 *cpuid,
17797 struct kvm_cpuid_entry2 __user *entries)
17798 {
17799 - int r;
17800 + int r, i;
17801
17802 r = -E2BIG;
17803 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
17804 goto out;
17805 r = -EFAULT;
17806 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
17807 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17808 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
17809 goto out;
17810 + for (i = 0; i < cpuid->nent; ++i) {
17811 + struct kvm_cpuid_entry2 cpuid_entry;
17812 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
17813 + goto out;
17814 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
17815 + }
17816 vcpu->arch.cpuid_nent = cpuid->nent;
17817 kvm_apic_set_version(vcpu);
17818 return 0;
17819 @@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
17820 struct kvm_cpuid2 *cpuid,
17821 struct kvm_cpuid_entry2 __user *entries)
17822 {
17823 - int r;
17824 + int r, i;
17825
17826 vcpu_load(vcpu);
17827 r = -E2BIG;
17828 if (cpuid->nent < vcpu->arch.cpuid_nent)
17829 goto out;
17830 r = -EFAULT;
17831 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
17832 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17833 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
17834 goto out;
17835 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
17836 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
17837 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
17838 + goto out;
17839 + }
17840 return 0;
17841
17842 out:
17843 @@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
17844 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
17845 struct kvm_interrupt *irq)
17846 {
17847 - if (irq->irq < 0 || irq->irq >= 256)
17848 + if (irq->irq >= 256)
17849 return -EINVAL;
17850 if (irqchip_in_kernel(vcpu->kvm))
17851 return -ENXIO;
17852 @@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
17853 .notifier_call = kvmclock_cpufreq_notifier
17854 };
17855
17856 -int kvm_arch_init(void *opaque)
17857 +int kvm_arch_init(const void *opaque)
17858 {
17859 int r, cpu;
17860 - struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
17861 + const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
17862
17863 if (kvm_x86_ops) {
17864 printk(KERN_ERR "kvm: already loaded the other module\n");
17865 diff -urNp linux-2.6.32.42/arch/x86/lib/atomic64_32.c linux-2.6.32.42/arch/x86/lib/atomic64_32.c
17866 --- linux-2.6.32.42/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
17867 +++ linux-2.6.32.42/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
17868 @@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
17869 }
17870 EXPORT_SYMBOL(atomic64_cmpxchg);
17871
17872 +u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
17873 +{
17874 + return cmpxchg8b(&ptr->counter, old_val, new_val);
17875 +}
17876 +EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
17877 +
17878 /**
17879 * atomic64_xchg - xchg atomic64 variable
17880 * @ptr: pointer to type atomic64_t
17881 @@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
17882 EXPORT_SYMBOL(atomic64_xchg);
17883
17884 /**
17885 + * atomic64_xchg_unchecked - xchg atomic64 variable
17886 + * @ptr: pointer to type atomic64_unchecked_t
17887 + * @new_val: value to assign
17888 + *
17889 + * Atomically xchgs the value of @ptr to @new_val and returns
17890 + * the old value.
17891 + */
17892 +u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
17893 +{
17894 + /*
17895 + * Try first with a (possibly incorrect) assumption about
17896 + * what we have there. We'll do two loops most likely,
17897 + * but we'll get an ownership MESI transaction straight away
17898 + * instead of a read transaction followed by a
17899 + * flush-for-ownership transaction:
17900 + */
17901 + u64 old_val, real_val = 0;
17902 +
17903 + do {
17904 + old_val = real_val;
17905 +
17906 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
17907 +
17908 + } while (real_val != old_val);
17909 +
17910 + return old_val;
17911 +}
17912 +EXPORT_SYMBOL(atomic64_xchg_unchecked);
17913 +
17914 +/**
17915 * atomic64_set - set atomic64 variable
17916 * @ptr: pointer to type atomic64_t
17917 * @new_val: value to assign
17918 @@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
17919 EXPORT_SYMBOL(atomic64_set);
17920
17921 /**
17922 -EXPORT_SYMBOL(atomic64_read);
17923 + * atomic64_unchecked_set - set atomic64 variable
17924 + * @ptr: pointer to type atomic64_unchecked_t
17925 + * @new_val: value to assign
17926 + *
17927 + * Atomically sets the value of @ptr to @new_val.
17928 + */
17929 +void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
17930 +{
17931 + atomic64_xchg_unchecked(ptr, new_val);
17932 +}
17933 +EXPORT_SYMBOL(atomic64_set_unchecked);
17934 +
17935 +/**
17936 * atomic64_add_return - add and return
17937 * @delta: integer value to add
17938 * @ptr: pointer to type atomic64_t
17939 @@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
17940 }
17941 EXPORT_SYMBOL(atomic64_add_return);
17942
17943 +/**
17944 + * atomic64_add_return_unchecked - add and return
17945 + * @delta: integer value to add
17946 + * @ptr: pointer to type atomic64_unchecked_t
17947 + *
17948 + * Atomically adds @delta to @ptr and returns @delta + *@ptr
17949 + */
17950 +noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17951 +{
17952 + /*
17953 + * Try first with a (possibly incorrect) assumption about
17954 + * what we have there. We'll do two loops most likely,
17955 + * but we'll get an ownership MESI transaction straight away
17956 + * instead of a read transaction followed by a
17957 + * flush-for-ownership transaction:
17958 + */
17959 + u64 old_val, new_val, real_val = 0;
17960 +
17961 + do {
17962 + old_val = real_val;
17963 + new_val = old_val + delta;
17964 +
17965 + real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
17966 +
17967 + } while (real_val != old_val);
17968 +
17969 + return new_val;
17970 +}
17971 +EXPORT_SYMBOL(atomic64_add_return_unchecked);
17972 +
17973 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
17974 {
17975 return atomic64_add_return(-delta, ptr);
17976 }
17977 EXPORT_SYMBOL(atomic64_sub_return);
17978
17979 +u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
17980 +{
17981 + return atomic64_add_return_unchecked(-delta, ptr);
17982 +}
17983 +EXPORT_SYMBOL(atomic64_sub_return_unchecked);
17984 +
17985 u64 atomic64_inc_return(atomic64_t *ptr)
17986 {
17987 return atomic64_add_return(1, ptr);
17988 }
17989 EXPORT_SYMBOL(atomic64_inc_return);
17990
17991 +u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
17992 +{
17993 + return atomic64_add_return_unchecked(1, ptr);
17994 +}
17995 +EXPORT_SYMBOL(atomic64_inc_return_unchecked);
17996 +
17997 u64 atomic64_dec_return(atomic64_t *ptr)
17998 {
17999 return atomic64_sub_return(1, ptr);
18000 }
18001 EXPORT_SYMBOL(atomic64_dec_return);
18002
18003 +u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
18004 +{
18005 + return atomic64_sub_return_unchecked(1, ptr);
18006 +}
18007 +EXPORT_SYMBOL(atomic64_dec_return_unchecked);
18008 +
18009 /**
18010 * atomic64_add - add integer to atomic64 variable
18011 * @delta: integer value to add
18012 @@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
18013 EXPORT_SYMBOL(atomic64_add);
18014
18015 /**
18016 + * atomic64_add_unchecked - add integer to atomic64 variable
18017 + * @delta: integer value to add
18018 + * @ptr: pointer to type atomic64_unchecked_t
18019 + *
18020 + * Atomically adds @delta to @ptr.
18021 + */
18022 +void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18023 +{
18024 + atomic64_add_return_unchecked(delta, ptr);
18025 +}
18026 +EXPORT_SYMBOL(atomic64_add_unchecked);
18027 +
18028 +/**
18029 * atomic64_sub - subtract the atomic64 variable
18030 * @delta: integer value to subtract
18031 * @ptr: pointer to type atomic64_t
18032 @@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
18033 EXPORT_SYMBOL(atomic64_sub);
18034
18035 /**
18036 + * atomic64_sub_unchecked - subtract the atomic64 variable
18037 + * @delta: integer value to subtract
18038 + * @ptr: pointer to type atomic64_unchecked_t
18039 + *
18040 + * Atomically subtracts @delta from @ptr.
18041 + */
18042 +void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18043 +{
18044 + atomic64_add_unchecked(-delta, ptr);
18045 +}
18046 +EXPORT_SYMBOL(atomic64_sub_unchecked);
18047 +
18048 +/**
18049 * atomic64_sub_and_test - subtract value from variable and test result
18050 * @delta: integer value to subtract
18051 * @ptr: pointer to type atomic64_t
18052 @@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
18053 EXPORT_SYMBOL(atomic64_inc);
18054
18055 /**
18056 + * atomic64_inc_unchecked - increment atomic64 variable
18057 + * @ptr: pointer to type atomic64_unchecked_t
18058 + *
18059 + * Atomically increments @ptr by 1.
18060 + */
18061 +void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
18062 +{
18063 + atomic64_add_unchecked(1, ptr);
18064 +}
18065 +EXPORT_SYMBOL(atomic64_inc_unchecked);
18066 +
18067 +/**
18068 * atomic64_dec - decrement atomic64 variable
18069 * @ptr: pointer to type atomic64_t
18070 *
18071 @@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
18072 EXPORT_SYMBOL(atomic64_dec);
18073
18074 /**
18075 + * atomic64_dec_unchecked - decrement atomic64 variable
18076 + * @ptr: pointer to type atomic64_unchecked_t
18077 + *
18078 + * Atomically decrements @ptr by 1.
18079 + */
18080 +void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
18081 +{
18082 + atomic64_sub_unchecked(1, ptr);
18083 +}
18084 +EXPORT_SYMBOL(atomic64_dec_unchecked);
18085 +
18086 +/**
18087 * atomic64_dec_and_test - decrement and test
18088 * @ptr: pointer to type atomic64_t
18089 *
18090 diff -urNp linux-2.6.32.42/arch/x86/lib/checksum_32.S linux-2.6.32.42/arch/x86/lib/checksum_32.S
18091 --- linux-2.6.32.42/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
18092 +++ linux-2.6.32.42/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
18093 @@ -28,7 +28,8 @@
18094 #include <linux/linkage.h>
18095 #include <asm/dwarf2.h>
18096 #include <asm/errno.h>
18097 -
18098 +#include <asm/segment.h>
18099 +
18100 /*
18101 * computes a partial checksum, e.g. for TCP/UDP fragments
18102 */
18103 @@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
18104
18105 #define ARGBASE 16
18106 #define FP 12
18107 -
18108 -ENTRY(csum_partial_copy_generic)
18109 +
18110 +ENTRY(csum_partial_copy_generic_to_user)
18111 CFI_STARTPROC
18112 +
18113 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18114 + pushl %gs
18115 + CFI_ADJUST_CFA_OFFSET 4
18116 + popl %es
18117 + CFI_ADJUST_CFA_OFFSET -4
18118 + jmp csum_partial_copy_generic
18119 +#endif
18120 +
18121 +ENTRY(csum_partial_copy_generic_from_user)
18122 +
18123 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18124 + pushl %gs
18125 + CFI_ADJUST_CFA_OFFSET 4
18126 + popl %ds
18127 + CFI_ADJUST_CFA_OFFSET -4
18128 +#endif
18129 +
18130 +ENTRY(csum_partial_copy_generic)
18131 subl $4,%esp
18132 CFI_ADJUST_CFA_OFFSET 4
18133 pushl %edi
18134 @@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
18135 jmp 4f
18136 SRC(1: movw (%esi), %bx )
18137 addl $2, %esi
18138 -DST( movw %bx, (%edi) )
18139 +DST( movw %bx, %es:(%edi) )
18140 addl $2, %edi
18141 addw %bx, %ax
18142 adcl $0, %eax
18143 @@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
18144 SRC(1: movl (%esi), %ebx )
18145 SRC( movl 4(%esi), %edx )
18146 adcl %ebx, %eax
18147 -DST( movl %ebx, (%edi) )
18148 +DST( movl %ebx, %es:(%edi) )
18149 adcl %edx, %eax
18150 -DST( movl %edx, 4(%edi) )
18151 +DST( movl %edx, %es:4(%edi) )
18152
18153 SRC( movl 8(%esi), %ebx )
18154 SRC( movl 12(%esi), %edx )
18155 adcl %ebx, %eax
18156 -DST( movl %ebx, 8(%edi) )
18157 +DST( movl %ebx, %es:8(%edi) )
18158 adcl %edx, %eax
18159 -DST( movl %edx, 12(%edi) )
18160 +DST( movl %edx, %es:12(%edi) )
18161
18162 SRC( movl 16(%esi), %ebx )
18163 SRC( movl 20(%esi), %edx )
18164 adcl %ebx, %eax
18165 -DST( movl %ebx, 16(%edi) )
18166 +DST( movl %ebx, %es:16(%edi) )
18167 adcl %edx, %eax
18168 -DST( movl %edx, 20(%edi) )
18169 +DST( movl %edx, %es:20(%edi) )
18170
18171 SRC( movl 24(%esi), %ebx )
18172 SRC( movl 28(%esi), %edx )
18173 adcl %ebx, %eax
18174 -DST( movl %ebx, 24(%edi) )
18175 +DST( movl %ebx, %es:24(%edi) )
18176 adcl %edx, %eax
18177 -DST( movl %edx, 28(%edi) )
18178 +DST( movl %edx, %es:28(%edi) )
18179
18180 lea 32(%esi), %esi
18181 lea 32(%edi), %edi
18182 @@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
18183 shrl $2, %edx # This clears CF
18184 SRC(3: movl (%esi), %ebx )
18185 adcl %ebx, %eax
18186 -DST( movl %ebx, (%edi) )
18187 +DST( movl %ebx, %es:(%edi) )
18188 lea 4(%esi), %esi
18189 lea 4(%edi), %edi
18190 dec %edx
18191 @@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
18192 jb 5f
18193 SRC( movw (%esi), %cx )
18194 leal 2(%esi), %esi
18195 -DST( movw %cx, (%edi) )
18196 +DST( movw %cx, %es:(%edi) )
18197 leal 2(%edi), %edi
18198 je 6f
18199 shll $16,%ecx
18200 SRC(5: movb (%esi), %cl )
18201 -DST( movb %cl, (%edi) )
18202 +DST( movb %cl, %es:(%edi) )
18203 6: addl %ecx, %eax
18204 adcl $0, %eax
18205 7:
18206 @@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
18207
18208 6001:
18209 movl ARGBASE+20(%esp), %ebx # src_err_ptr
18210 - movl $-EFAULT, (%ebx)
18211 + movl $-EFAULT, %ss:(%ebx)
18212
18213 # zero the complete destination - computing the rest
18214 # is too much work
18215 @@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
18216
18217 6002:
18218 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18219 - movl $-EFAULT,(%ebx)
18220 + movl $-EFAULT,%ss:(%ebx)
18221 jmp 5000b
18222
18223 .previous
18224
18225 + pushl %ss
18226 + CFI_ADJUST_CFA_OFFSET 4
18227 + popl %ds
18228 + CFI_ADJUST_CFA_OFFSET -4
18229 + pushl %ss
18230 + CFI_ADJUST_CFA_OFFSET 4
18231 + popl %es
18232 + CFI_ADJUST_CFA_OFFSET -4
18233 popl %ebx
18234 CFI_ADJUST_CFA_OFFSET -4
18235 CFI_RESTORE ebx
18236 @@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
18237 CFI_ADJUST_CFA_OFFSET -4
18238 ret
18239 CFI_ENDPROC
18240 -ENDPROC(csum_partial_copy_generic)
18241 +ENDPROC(csum_partial_copy_generic_to_user)
18242
18243 #else
18244
18245 /* Version for PentiumII/PPro */
18246
18247 #define ROUND1(x) \
18248 + nop; nop; nop; \
18249 SRC(movl x(%esi), %ebx ) ; \
18250 addl %ebx, %eax ; \
18251 - DST(movl %ebx, x(%edi) ) ;
18252 + DST(movl %ebx, %es:x(%edi)) ;
18253
18254 #define ROUND(x) \
18255 + nop; nop; nop; \
18256 SRC(movl x(%esi), %ebx ) ; \
18257 adcl %ebx, %eax ; \
18258 - DST(movl %ebx, x(%edi) ) ;
18259 + DST(movl %ebx, %es:x(%edi)) ;
18260
18261 #define ARGBASE 12
18262 -
18263 -ENTRY(csum_partial_copy_generic)
18264 +
18265 +ENTRY(csum_partial_copy_generic_to_user)
18266 CFI_STARTPROC
18267 +
18268 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18269 + pushl %gs
18270 + CFI_ADJUST_CFA_OFFSET 4
18271 + popl %es
18272 + CFI_ADJUST_CFA_OFFSET -4
18273 + jmp csum_partial_copy_generic
18274 +#endif
18275 +
18276 +ENTRY(csum_partial_copy_generic_from_user)
18277 +
18278 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18279 + pushl %gs
18280 + CFI_ADJUST_CFA_OFFSET 4
18281 + popl %ds
18282 + CFI_ADJUST_CFA_OFFSET -4
18283 +#endif
18284 +
18285 +ENTRY(csum_partial_copy_generic)
18286 pushl %ebx
18287 CFI_ADJUST_CFA_OFFSET 4
18288 CFI_REL_OFFSET ebx, 0
18289 @@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
18290 subl %ebx, %edi
18291 lea -1(%esi),%edx
18292 andl $-32,%edx
18293 - lea 3f(%ebx,%ebx), %ebx
18294 + lea 3f(%ebx,%ebx,2), %ebx
18295 testl %esi, %esi
18296 jmp *%ebx
18297 1: addl $64,%esi
18298 @@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
18299 jb 5f
18300 SRC( movw (%esi), %dx )
18301 leal 2(%esi), %esi
18302 -DST( movw %dx, (%edi) )
18303 +DST( movw %dx, %es:(%edi) )
18304 leal 2(%edi), %edi
18305 je 6f
18306 shll $16,%edx
18307 5:
18308 SRC( movb (%esi), %dl )
18309 -DST( movb %dl, (%edi) )
18310 +DST( movb %dl, %es:(%edi) )
18311 6: addl %edx, %eax
18312 adcl $0, %eax
18313 7:
18314 .section .fixup, "ax"
18315 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
18316 - movl $-EFAULT, (%ebx)
18317 + movl $-EFAULT, %ss:(%ebx)
18318 # zero the complete destination (computing the rest is too much work)
18319 movl ARGBASE+8(%esp),%edi # dst
18320 movl ARGBASE+12(%esp),%ecx # len
18321 @@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
18322 rep; stosb
18323 jmp 7b
18324 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18325 - movl $-EFAULT, (%ebx)
18326 + movl $-EFAULT, %ss:(%ebx)
18327 jmp 7b
18328 .previous
18329
18330 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18331 + pushl %ss
18332 + CFI_ADJUST_CFA_OFFSET 4
18333 + popl %ds
18334 + CFI_ADJUST_CFA_OFFSET -4
18335 + pushl %ss
18336 + CFI_ADJUST_CFA_OFFSET 4
18337 + popl %es
18338 + CFI_ADJUST_CFA_OFFSET -4
18339 +#endif
18340 +
18341 popl %esi
18342 CFI_ADJUST_CFA_OFFSET -4
18343 CFI_RESTORE esi
18344 @@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
18345 CFI_RESTORE ebx
18346 ret
18347 CFI_ENDPROC
18348 -ENDPROC(csum_partial_copy_generic)
18349 +ENDPROC(csum_partial_copy_generic_to_user)
18350
18351 #undef ROUND
18352 #undef ROUND1
18353 diff -urNp linux-2.6.32.42/arch/x86/lib/clear_page_64.S linux-2.6.32.42/arch/x86/lib/clear_page_64.S
18354 --- linux-2.6.32.42/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
18355 +++ linux-2.6.32.42/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
18356 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
18357
18358 #include <asm/cpufeature.h>
18359
18360 - .section .altinstr_replacement,"ax"
18361 + .section .altinstr_replacement,"a"
18362 1: .byte 0xeb /* jmp <disp8> */
18363 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
18364 2:
18365 diff -urNp linux-2.6.32.42/arch/x86/lib/copy_page_64.S linux-2.6.32.42/arch/x86/lib/copy_page_64.S
18366 --- linux-2.6.32.42/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
18367 +++ linux-2.6.32.42/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
18368 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
18369
18370 #include <asm/cpufeature.h>
18371
18372 - .section .altinstr_replacement,"ax"
18373 + .section .altinstr_replacement,"a"
18374 1: .byte 0xeb /* jmp <disp8> */
18375 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
18376 2:
18377 diff -urNp linux-2.6.32.42/arch/x86/lib/copy_user_64.S linux-2.6.32.42/arch/x86/lib/copy_user_64.S
18378 --- linux-2.6.32.42/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
18379 +++ linux-2.6.32.42/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
18380 @@ -15,13 +15,14 @@
18381 #include <asm/asm-offsets.h>
18382 #include <asm/thread_info.h>
18383 #include <asm/cpufeature.h>
18384 +#include <asm/pgtable.h>
18385
18386 .macro ALTERNATIVE_JUMP feature,orig,alt
18387 0:
18388 .byte 0xe9 /* 32bit jump */
18389 .long \orig-1f /* by default jump to orig */
18390 1:
18391 - .section .altinstr_replacement,"ax"
18392 + .section .altinstr_replacement,"a"
18393 2: .byte 0xe9 /* near jump with 32bit immediate */
18394 .long \alt-1b /* offset */ /* or alternatively to alt */
18395 .previous
18396 @@ -64,49 +65,19 @@
18397 #endif
18398 .endm
18399
18400 -/* Standard copy_to_user with segment limit checking */
18401 -ENTRY(copy_to_user)
18402 - CFI_STARTPROC
18403 - GET_THREAD_INFO(%rax)
18404 - movq %rdi,%rcx
18405 - addq %rdx,%rcx
18406 - jc bad_to_user
18407 - cmpq TI_addr_limit(%rax),%rcx
18408 - ja bad_to_user
18409 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18410 - CFI_ENDPROC
18411 -ENDPROC(copy_to_user)
18412 -
18413 -/* Standard copy_from_user with segment limit checking */
18414 -ENTRY(copy_from_user)
18415 - CFI_STARTPROC
18416 - GET_THREAD_INFO(%rax)
18417 - movq %rsi,%rcx
18418 - addq %rdx,%rcx
18419 - jc bad_from_user
18420 - cmpq TI_addr_limit(%rax),%rcx
18421 - ja bad_from_user
18422 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18423 - CFI_ENDPROC
18424 -ENDPROC(copy_from_user)
18425 -
18426 ENTRY(copy_user_generic)
18427 CFI_STARTPROC
18428 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18429 CFI_ENDPROC
18430 ENDPROC(copy_user_generic)
18431
18432 -ENTRY(__copy_from_user_inatomic)
18433 - CFI_STARTPROC
18434 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
18435 - CFI_ENDPROC
18436 -ENDPROC(__copy_from_user_inatomic)
18437 -
18438 .section .fixup,"ax"
18439 /* must zero dest */
18440 ENTRY(bad_from_user)
18441 bad_from_user:
18442 CFI_STARTPROC
18443 + testl %edx,%edx
18444 + js bad_to_user
18445 movl %edx,%ecx
18446 xorl %eax,%eax
18447 rep
18448 diff -urNp linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S
18449 --- linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
18450 +++ linux-2.6.32.42/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
18451 @@ -14,6 +14,7 @@
18452 #include <asm/current.h>
18453 #include <asm/asm-offsets.h>
18454 #include <asm/thread_info.h>
18455 +#include <asm/pgtable.h>
18456
18457 .macro ALIGN_DESTINATION
18458 #ifdef FIX_ALIGNMENT
18459 @@ -50,6 +51,15 @@
18460 */
18461 ENTRY(__copy_user_nocache)
18462 CFI_STARTPROC
18463 +
18464 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18465 + mov $PAX_USER_SHADOW_BASE,%rcx
18466 + cmp %rcx,%rsi
18467 + jae 1f
18468 + add %rcx,%rsi
18469 +1:
18470 +#endif
18471 +
18472 cmpl $8,%edx
18473 jb 20f /* less then 8 bytes, go to byte copy loop */
18474 ALIGN_DESTINATION
18475 diff -urNp linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c
18476 --- linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
18477 +++ linux-2.6.32.42/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
18478 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
18479 len -= 2;
18480 }
18481 }
18482 +
18483 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18484 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18485 + src += PAX_USER_SHADOW_BASE;
18486 +#endif
18487 +
18488 isum = csum_partial_copy_generic((__force const void *)src,
18489 dst, len, isum, errp, NULL);
18490 if (unlikely(*errp))
18491 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
18492 }
18493
18494 *errp = 0;
18495 +
18496 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18497 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
18498 + dst += PAX_USER_SHADOW_BASE;
18499 +#endif
18500 +
18501 return csum_partial_copy_generic(src, (void __force *)dst,
18502 len, isum, NULL, errp);
18503 }
18504 diff -urNp linux-2.6.32.42/arch/x86/lib/getuser.S linux-2.6.32.42/arch/x86/lib/getuser.S
18505 --- linux-2.6.32.42/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
18506 +++ linux-2.6.32.42/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
18507 @@ -33,14 +33,35 @@
18508 #include <asm/asm-offsets.h>
18509 #include <asm/thread_info.h>
18510 #include <asm/asm.h>
18511 +#include <asm/segment.h>
18512 +#include <asm/pgtable.h>
18513 +
18514 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18515 +#define __copyuser_seg gs;
18516 +#else
18517 +#define __copyuser_seg
18518 +#endif
18519
18520 .text
18521 ENTRY(__get_user_1)
18522 CFI_STARTPROC
18523 +
18524 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18525 GET_THREAD_INFO(%_ASM_DX)
18526 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18527 jae bad_get_user
18528 -1: movzb (%_ASM_AX),%edx
18529 +
18530 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18531 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18532 + cmp %_ASM_DX,%_ASM_AX
18533 + jae 1234f
18534 + add %_ASM_DX,%_ASM_AX
18535 +1234:
18536 +#endif
18537 +
18538 +#endif
18539 +
18540 +1: __copyuser_seg movzb (%_ASM_AX),%edx
18541 xor %eax,%eax
18542 ret
18543 CFI_ENDPROC
18544 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
18545 ENTRY(__get_user_2)
18546 CFI_STARTPROC
18547 add $1,%_ASM_AX
18548 +
18549 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18550 jc bad_get_user
18551 GET_THREAD_INFO(%_ASM_DX)
18552 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18553 jae bad_get_user
18554 -2: movzwl -1(%_ASM_AX),%edx
18555 +
18556 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18557 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18558 + cmp %_ASM_DX,%_ASM_AX
18559 + jae 1234f
18560 + add %_ASM_DX,%_ASM_AX
18561 +1234:
18562 +#endif
18563 +
18564 +#endif
18565 +
18566 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
18567 xor %eax,%eax
18568 ret
18569 CFI_ENDPROC
18570 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
18571 ENTRY(__get_user_4)
18572 CFI_STARTPROC
18573 add $3,%_ASM_AX
18574 +
18575 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18576 jc bad_get_user
18577 GET_THREAD_INFO(%_ASM_DX)
18578 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18579 jae bad_get_user
18580 -3: mov -3(%_ASM_AX),%edx
18581 +
18582 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18583 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18584 + cmp %_ASM_DX,%_ASM_AX
18585 + jae 1234f
18586 + add %_ASM_DX,%_ASM_AX
18587 +1234:
18588 +#endif
18589 +
18590 +#endif
18591 +
18592 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
18593 xor %eax,%eax
18594 ret
18595 CFI_ENDPROC
18596 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
18597 GET_THREAD_INFO(%_ASM_DX)
18598 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
18599 jae bad_get_user
18600 +
18601 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18602 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
18603 + cmp %_ASM_DX,%_ASM_AX
18604 + jae 1234f
18605 + add %_ASM_DX,%_ASM_AX
18606 +1234:
18607 +#endif
18608 +
18609 4: movq -7(%_ASM_AX),%_ASM_DX
18610 xor %eax,%eax
18611 ret
18612 diff -urNp linux-2.6.32.42/arch/x86/lib/memcpy_64.S linux-2.6.32.42/arch/x86/lib/memcpy_64.S
18613 --- linux-2.6.32.42/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
18614 +++ linux-2.6.32.42/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
18615 @@ -128,7 +128,7 @@ ENDPROC(__memcpy)
18616 * It is also a lot simpler. Use this when possible:
18617 */
18618
18619 - .section .altinstr_replacement, "ax"
18620 + .section .altinstr_replacement, "a"
18621 1: .byte 0xeb /* jmp <disp8> */
18622 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
18623 2:
18624 diff -urNp linux-2.6.32.42/arch/x86/lib/memset_64.S linux-2.6.32.42/arch/x86/lib/memset_64.S
18625 --- linux-2.6.32.42/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
18626 +++ linux-2.6.32.42/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
18627 @@ -118,7 +118,7 @@ ENDPROC(__memset)
18628
18629 #include <asm/cpufeature.h>
18630
18631 - .section .altinstr_replacement,"ax"
18632 + .section .altinstr_replacement,"a"
18633 1: .byte 0xeb /* jmp <disp8> */
18634 .byte (memset_c - memset) - (2f - 1b) /* offset */
18635 2:
18636 diff -urNp linux-2.6.32.42/arch/x86/lib/mmx_32.c linux-2.6.32.42/arch/x86/lib/mmx_32.c
18637 --- linux-2.6.32.42/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
18638 +++ linux-2.6.32.42/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
18639 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
18640 {
18641 void *p;
18642 int i;
18643 + unsigned long cr0;
18644
18645 if (unlikely(in_interrupt()))
18646 return __memcpy(to, from, len);
18647 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
18648 kernel_fpu_begin();
18649
18650 __asm__ __volatile__ (
18651 - "1: prefetch (%0)\n" /* This set is 28 bytes */
18652 - " prefetch 64(%0)\n"
18653 - " prefetch 128(%0)\n"
18654 - " prefetch 192(%0)\n"
18655 - " prefetch 256(%0)\n"
18656 + "1: prefetch (%1)\n" /* This set is 28 bytes */
18657 + " prefetch 64(%1)\n"
18658 + " prefetch 128(%1)\n"
18659 + " prefetch 192(%1)\n"
18660 + " prefetch 256(%1)\n"
18661 "2: \n"
18662 ".section .fixup, \"ax\"\n"
18663 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18664 + "3: \n"
18665 +
18666 +#ifdef CONFIG_PAX_KERNEXEC
18667 + " movl %%cr0, %0\n"
18668 + " movl %0, %%eax\n"
18669 + " andl $0xFFFEFFFF, %%eax\n"
18670 + " movl %%eax, %%cr0\n"
18671 +#endif
18672 +
18673 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18674 +
18675 +#ifdef CONFIG_PAX_KERNEXEC
18676 + " movl %0, %%cr0\n"
18677 +#endif
18678 +
18679 " jmp 2b\n"
18680 ".previous\n"
18681 _ASM_EXTABLE(1b, 3b)
18682 - : : "r" (from));
18683 + : "=&r" (cr0) : "r" (from) : "ax");
18684
18685 for ( ; i > 5; i--) {
18686 __asm__ __volatile__ (
18687 - "1: prefetch 320(%0)\n"
18688 - "2: movq (%0), %%mm0\n"
18689 - " movq 8(%0), %%mm1\n"
18690 - " movq 16(%0), %%mm2\n"
18691 - " movq 24(%0), %%mm3\n"
18692 - " movq %%mm0, (%1)\n"
18693 - " movq %%mm1, 8(%1)\n"
18694 - " movq %%mm2, 16(%1)\n"
18695 - " movq %%mm3, 24(%1)\n"
18696 - " movq 32(%0), %%mm0\n"
18697 - " movq 40(%0), %%mm1\n"
18698 - " movq 48(%0), %%mm2\n"
18699 - " movq 56(%0), %%mm3\n"
18700 - " movq %%mm0, 32(%1)\n"
18701 - " movq %%mm1, 40(%1)\n"
18702 - " movq %%mm2, 48(%1)\n"
18703 - " movq %%mm3, 56(%1)\n"
18704 + "1: prefetch 320(%1)\n"
18705 + "2: movq (%1), %%mm0\n"
18706 + " movq 8(%1), %%mm1\n"
18707 + " movq 16(%1), %%mm2\n"
18708 + " movq 24(%1), %%mm3\n"
18709 + " movq %%mm0, (%2)\n"
18710 + " movq %%mm1, 8(%2)\n"
18711 + " movq %%mm2, 16(%2)\n"
18712 + " movq %%mm3, 24(%2)\n"
18713 + " movq 32(%1), %%mm0\n"
18714 + " movq 40(%1), %%mm1\n"
18715 + " movq 48(%1), %%mm2\n"
18716 + " movq 56(%1), %%mm3\n"
18717 + " movq %%mm0, 32(%2)\n"
18718 + " movq %%mm1, 40(%2)\n"
18719 + " movq %%mm2, 48(%2)\n"
18720 + " movq %%mm3, 56(%2)\n"
18721 ".section .fixup, \"ax\"\n"
18722 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18723 + "3:\n"
18724 +
18725 +#ifdef CONFIG_PAX_KERNEXEC
18726 + " movl %%cr0, %0\n"
18727 + " movl %0, %%eax\n"
18728 + " andl $0xFFFEFFFF, %%eax\n"
18729 + " movl %%eax, %%cr0\n"
18730 +#endif
18731 +
18732 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18733 +
18734 +#ifdef CONFIG_PAX_KERNEXEC
18735 + " movl %0, %%cr0\n"
18736 +#endif
18737 +
18738 " jmp 2b\n"
18739 ".previous\n"
18740 _ASM_EXTABLE(1b, 3b)
18741 - : : "r" (from), "r" (to) : "memory");
18742 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18743
18744 from += 64;
18745 to += 64;
18746 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
18747 static void fast_copy_page(void *to, void *from)
18748 {
18749 int i;
18750 + unsigned long cr0;
18751
18752 kernel_fpu_begin();
18753
18754 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
18755 * but that is for later. -AV
18756 */
18757 __asm__ __volatile__(
18758 - "1: prefetch (%0)\n"
18759 - " prefetch 64(%0)\n"
18760 - " prefetch 128(%0)\n"
18761 - " prefetch 192(%0)\n"
18762 - " prefetch 256(%0)\n"
18763 + "1: prefetch (%1)\n"
18764 + " prefetch 64(%1)\n"
18765 + " prefetch 128(%1)\n"
18766 + " prefetch 192(%1)\n"
18767 + " prefetch 256(%1)\n"
18768 "2: \n"
18769 ".section .fixup, \"ax\"\n"
18770 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18771 + "3: \n"
18772 +
18773 +#ifdef CONFIG_PAX_KERNEXEC
18774 + " movl %%cr0, %0\n"
18775 + " movl %0, %%eax\n"
18776 + " andl $0xFFFEFFFF, %%eax\n"
18777 + " movl %%eax, %%cr0\n"
18778 +#endif
18779 +
18780 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18781 +
18782 +#ifdef CONFIG_PAX_KERNEXEC
18783 + " movl %0, %%cr0\n"
18784 +#endif
18785 +
18786 " jmp 2b\n"
18787 ".previous\n"
18788 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
18789 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18790
18791 for (i = 0; i < (4096-320)/64; i++) {
18792 __asm__ __volatile__ (
18793 - "1: prefetch 320(%0)\n"
18794 - "2: movq (%0), %%mm0\n"
18795 - " movntq %%mm0, (%1)\n"
18796 - " movq 8(%0), %%mm1\n"
18797 - " movntq %%mm1, 8(%1)\n"
18798 - " movq 16(%0), %%mm2\n"
18799 - " movntq %%mm2, 16(%1)\n"
18800 - " movq 24(%0), %%mm3\n"
18801 - " movntq %%mm3, 24(%1)\n"
18802 - " movq 32(%0), %%mm4\n"
18803 - " movntq %%mm4, 32(%1)\n"
18804 - " movq 40(%0), %%mm5\n"
18805 - " movntq %%mm5, 40(%1)\n"
18806 - " movq 48(%0), %%mm6\n"
18807 - " movntq %%mm6, 48(%1)\n"
18808 - " movq 56(%0), %%mm7\n"
18809 - " movntq %%mm7, 56(%1)\n"
18810 + "1: prefetch 320(%1)\n"
18811 + "2: movq (%1), %%mm0\n"
18812 + " movntq %%mm0, (%2)\n"
18813 + " movq 8(%1), %%mm1\n"
18814 + " movntq %%mm1, 8(%2)\n"
18815 + " movq 16(%1), %%mm2\n"
18816 + " movntq %%mm2, 16(%2)\n"
18817 + " movq 24(%1), %%mm3\n"
18818 + " movntq %%mm3, 24(%2)\n"
18819 + " movq 32(%1), %%mm4\n"
18820 + " movntq %%mm4, 32(%2)\n"
18821 + " movq 40(%1), %%mm5\n"
18822 + " movntq %%mm5, 40(%2)\n"
18823 + " movq 48(%1), %%mm6\n"
18824 + " movntq %%mm6, 48(%2)\n"
18825 + " movq 56(%1), %%mm7\n"
18826 + " movntq %%mm7, 56(%2)\n"
18827 ".section .fixup, \"ax\"\n"
18828 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18829 + "3:\n"
18830 +
18831 +#ifdef CONFIG_PAX_KERNEXEC
18832 + " movl %%cr0, %0\n"
18833 + " movl %0, %%eax\n"
18834 + " andl $0xFFFEFFFF, %%eax\n"
18835 + " movl %%eax, %%cr0\n"
18836 +#endif
18837 +
18838 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18839 +
18840 +#ifdef CONFIG_PAX_KERNEXEC
18841 + " movl %0, %%cr0\n"
18842 +#endif
18843 +
18844 " jmp 2b\n"
18845 ".previous\n"
18846 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
18847 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18848
18849 from += 64;
18850 to += 64;
18851 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
18852 static void fast_copy_page(void *to, void *from)
18853 {
18854 int i;
18855 + unsigned long cr0;
18856
18857 kernel_fpu_begin();
18858
18859 __asm__ __volatile__ (
18860 - "1: prefetch (%0)\n"
18861 - " prefetch 64(%0)\n"
18862 - " prefetch 128(%0)\n"
18863 - " prefetch 192(%0)\n"
18864 - " prefetch 256(%0)\n"
18865 + "1: prefetch (%1)\n"
18866 + " prefetch 64(%1)\n"
18867 + " prefetch 128(%1)\n"
18868 + " prefetch 192(%1)\n"
18869 + " prefetch 256(%1)\n"
18870 "2: \n"
18871 ".section .fixup, \"ax\"\n"
18872 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18873 + "3: \n"
18874 +
18875 +#ifdef CONFIG_PAX_KERNEXEC
18876 + " movl %%cr0, %0\n"
18877 + " movl %0, %%eax\n"
18878 + " andl $0xFFFEFFFF, %%eax\n"
18879 + " movl %%eax, %%cr0\n"
18880 +#endif
18881 +
18882 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
18883 +
18884 +#ifdef CONFIG_PAX_KERNEXEC
18885 + " movl %0, %%cr0\n"
18886 +#endif
18887 +
18888 " jmp 2b\n"
18889 ".previous\n"
18890 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
18891 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
18892
18893 for (i = 0; i < 4096/64; i++) {
18894 __asm__ __volatile__ (
18895 - "1: prefetch 320(%0)\n"
18896 - "2: movq (%0), %%mm0\n"
18897 - " movq 8(%0), %%mm1\n"
18898 - " movq 16(%0), %%mm2\n"
18899 - " movq 24(%0), %%mm3\n"
18900 - " movq %%mm0, (%1)\n"
18901 - " movq %%mm1, 8(%1)\n"
18902 - " movq %%mm2, 16(%1)\n"
18903 - " movq %%mm3, 24(%1)\n"
18904 - " movq 32(%0), %%mm0\n"
18905 - " movq 40(%0), %%mm1\n"
18906 - " movq 48(%0), %%mm2\n"
18907 - " movq 56(%0), %%mm3\n"
18908 - " movq %%mm0, 32(%1)\n"
18909 - " movq %%mm1, 40(%1)\n"
18910 - " movq %%mm2, 48(%1)\n"
18911 - " movq %%mm3, 56(%1)\n"
18912 + "1: prefetch 320(%1)\n"
18913 + "2: movq (%1), %%mm0\n"
18914 + " movq 8(%1), %%mm1\n"
18915 + " movq 16(%1), %%mm2\n"
18916 + " movq 24(%1), %%mm3\n"
18917 + " movq %%mm0, (%2)\n"
18918 + " movq %%mm1, 8(%2)\n"
18919 + " movq %%mm2, 16(%2)\n"
18920 + " movq %%mm3, 24(%2)\n"
18921 + " movq 32(%1), %%mm0\n"
18922 + " movq 40(%1), %%mm1\n"
18923 + " movq 48(%1), %%mm2\n"
18924 + " movq 56(%1), %%mm3\n"
18925 + " movq %%mm0, 32(%2)\n"
18926 + " movq %%mm1, 40(%2)\n"
18927 + " movq %%mm2, 48(%2)\n"
18928 + " movq %%mm3, 56(%2)\n"
18929 ".section .fixup, \"ax\"\n"
18930 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18931 + "3:\n"
18932 +
18933 +#ifdef CONFIG_PAX_KERNEXEC
18934 + " movl %%cr0, %0\n"
18935 + " movl %0, %%eax\n"
18936 + " andl $0xFFFEFFFF, %%eax\n"
18937 + " movl %%eax, %%cr0\n"
18938 +#endif
18939 +
18940 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
18941 +
18942 +#ifdef CONFIG_PAX_KERNEXEC
18943 + " movl %0, %%cr0\n"
18944 +#endif
18945 +
18946 " jmp 2b\n"
18947 ".previous\n"
18948 _ASM_EXTABLE(1b, 3b)
18949 - : : "r" (from), "r" (to) : "memory");
18950 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
18951
18952 from += 64;
18953 to += 64;
18954 diff -urNp linux-2.6.32.42/arch/x86/lib/putuser.S linux-2.6.32.42/arch/x86/lib/putuser.S
18955 --- linux-2.6.32.42/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
18956 +++ linux-2.6.32.42/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
18957 @@ -15,7 +15,8 @@
18958 #include <asm/thread_info.h>
18959 #include <asm/errno.h>
18960 #include <asm/asm.h>
18961 -
18962 +#include <asm/segment.h>
18963 +#include <asm/pgtable.h>
18964
18965 /*
18966 * __put_user_X
18967 @@ -29,52 +30,119 @@
18968 * as they get called from within inline assembly.
18969 */
18970
18971 -#define ENTER CFI_STARTPROC ; \
18972 - GET_THREAD_INFO(%_ASM_BX)
18973 +#define ENTER CFI_STARTPROC
18974 #define EXIT ret ; \
18975 CFI_ENDPROC
18976
18977 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18978 +#define _DEST %_ASM_CX,%_ASM_BX
18979 +#else
18980 +#define _DEST %_ASM_CX
18981 +#endif
18982 +
18983 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18984 +#define __copyuser_seg gs;
18985 +#else
18986 +#define __copyuser_seg
18987 +#endif
18988 +
18989 .text
18990 ENTRY(__put_user_1)
18991 ENTER
18992 +
18993 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
18994 + GET_THREAD_INFO(%_ASM_BX)
18995 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
18996 jae bad_put_user
18997 -1: movb %al,(%_ASM_CX)
18998 +
18999 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19000 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19001 + cmp %_ASM_BX,%_ASM_CX
19002 + jb 1234f
19003 + xor %ebx,%ebx
19004 +1234:
19005 +#endif
19006 +
19007 +#endif
19008 +
19009 +1: __copyuser_seg movb %al,(_DEST)
19010 xor %eax,%eax
19011 EXIT
19012 ENDPROC(__put_user_1)
19013
19014 ENTRY(__put_user_2)
19015 ENTER
19016 +
19017 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19018 + GET_THREAD_INFO(%_ASM_BX)
19019 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19020 sub $1,%_ASM_BX
19021 cmp %_ASM_BX,%_ASM_CX
19022 jae bad_put_user
19023 -2: movw %ax,(%_ASM_CX)
19024 +
19025 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19026 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19027 + cmp %_ASM_BX,%_ASM_CX
19028 + jb 1234f
19029 + xor %ebx,%ebx
19030 +1234:
19031 +#endif
19032 +
19033 +#endif
19034 +
19035 +2: __copyuser_seg movw %ax,(_DEST)
19036 xor %eax,%eax
19037 EXIT
19038 ENDPROC(__put_user_2)
19039
19040 ENTRY(__put_user_4)
19041 ENTER
19042 +
19043 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19044 + GET_THREAD_INFO(%_ASM_BX)
19045 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19046 sub $3,%_ASM_BX
19047 cmp %_ASM_BX,%_ASM_CX
19048 jae bad_put_user
19049 -3: movl %eax,(%_ASM_CX)
19050 +
19051 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19052 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19053 + cmp %_ASM_BX,%_ASM_CX
19054 + jb 1234f
19055 + xor %ebx,%ebx
19056 +1234:
19057 +#endif
19058 +
19059 +#endif
19060 +
19061 +3: __copyuser_seg movl %eax,(_DEST)
19062 xor %eax,%eax
19063 EXIT
19064 ENDPROC(__put_user_4)
19065
19066 ENTRY(__put_user_8)
19067 ENTER
19068 +
19069 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19070 + GET_THREAD_INFO(%_ASM_BX)
19071 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19072 sub $7,%_ASM_BX
19073 cmp %_ASM_BX,%_ASM_CX
19074 jae bad_put_user
19075 -4: mov %_ASM_AX,(%_ASM_CX)
19076 +
19077 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19078 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19079 + cmp %_ASM_BX,%_ASM_CX
19080 + jb 1234f
19081 + xor %ebx,%ebx
19082 +1234:
19083 +#endif
19084 +
19085 +#endif
19086 +
19087 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
19088 #ifdef CONFIG_X86_32
19089 -5: movl %edx,4(%_ASM_CX)
19090 +5: __copyuser_seg movl %edx,4(_DEST)
19091 #endif
19092 xor %eax,%eax
19093 EXIT
19094 diff -urNp linux-2.6.32.42/arch/x86/lib/usercopy_32.c linux-2.6.32.42/arch/x86/lib/usercopy_32.c
19095 --- linux-2.6.32.42/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
19096 +++ linux-2.6.32.42/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
19097 @@ -43,7 +43,7 @@ do { \
19098 __asm__ __volatile__( \
19099 " testl %1,%1\n" \
19100 " jz 2f\n" \
19101 - "0: lodsb\n" \
19102 + "0: "__copyuser_seg"lodsb\n" \
19103 " stosb\n" \
19104 " testb %%al,%%al\n" \
19105 " jz 1f\n" \
19106 @@ -128,10 +128,12 @@ do { \
19107 int __d0; \
19108 might_fault(); \
19109 __asm__ __volatile__( \
19110 + __COPYUSER_SET_ES \
19111 "0: rep; stosl\n" \
19112 " movl %2,%0\n" \
19113 "1: rep; stosb\n" \
19114 "2:\n" \
19115 + __COPYUSER_RESTORE_ES \
19116 ".section .fixup,\"ax\"\n" \
19117 "3: lea 0(%2,%0,4),%0\n" \
19118 " jmp 2b\n" \
19119 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19120 might_fault();
19121
19122 __asm__ __volatile__(
19123 + __COPYUSER_SET_ES
19124 " testl %0, %0\n"
19125 " jz 3f\n"
19126 " andl %0,%%ecx\n"
19127 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19128 " subl %%ecx,%0\n"
19129 " addl %0,%%eax\n"
19130 "1:\n"
19131 + __COPYUSER_RESTORE_ES
19132 ".section .fixup,\"ax\"\n"
19133 "2: xorl %%eax,%%eax\n"
19134 " jmp 1b\n"
19135 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19136
19137 #ifdef CONFIG_X86_INTEL_USERCOPY
19138 static unsigned long
19139 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
19140 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19141 {
19142 int d0, d1;
19143 __asm__ __volatile__(
19144 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19145 " .align 2,0x90\n"
19146 "3: movl 0(%4), %%eax\n"
19147 "4: movl 4(%4), %%edx\n"
19148 - "5: movl %%eax, 0(%3)\n"
19149 - "6: movl %%edx, 4(%3)\n"
19150 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19151 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19152 "7: movl 8(%4), %%eax\n"
19153 "8: movl 12(%4),%%edx\n"
19154 - "9: movl %%eax, 8(%3)\n"
19155 - "10: movl %%edx, 12(%3)\n"
19156 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19157 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19158 "11: movl 16(%4), %%eax\n"
19159 "12: movl 20(%4), %%edx\n"
19160 - "13: movl %%eax, 16(%3)\n"
19161 - "14: movl %%edx, 20(%3)\n"
19162 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19163 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19164 "15: movl 24(%4), %%eax\n"
19165 "16: movl 28(%4), %%edx\n"
19166 - "17: movl %%eax, 24(%3)\n"
19167 - "18: movl %%edx, 28(%3)\n"
19168 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19169 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19170 "19: movl 32(%4), %%eax\n"
19171 "20: movl 36(%4), %%edx\n"
19172 - "21: movl %%eax, 32(%3)\n"
19173 - "22: movl %%edx, 36(%3)\n"
19174 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19175 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19176 "23: movl 40(%4), %%eax\n"
19177 "24: movl 44(%4), %%edx\n"
19178 - "25: movl %%eax, 40(%3)\n"
19179 - "26: movl %%edx, 44(%3)\n"
19180 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19181 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19182 "27: movl 48(%4), %%eax\n"
19183 "28: movl 52(%4), %%edx\n"
19184 - "29: movl %%eax, 48(%3)\n"
19185 - "30: movl %%edx, 52(%3)\n"
19186 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19187 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19188 "31: movl 56(%4), %%eax\n"
19189 "32: movl 60(%4), %%edx\n"
19190 - "33: movl %%eax, 56(%3)\n"
19191 - "34: movl %%edx, 60(%3)\n"
19192 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19193 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19194 " addl $-64, %0\n"
19195 " addl $64, %4\n"
19196 " addl $64, %3\n"
19197 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19198 " shrl $2, %0\n"
19199 " andl $3, %%eax\n"
19200 " cld\n"
19201 + __COPYUSER_SET_ES
19202 "99: rep; movsl\n"
19203 "36: movl %%eax, %0\n"
19204 "37: rep; movsb\n"
19205 "100:\n"
19206 + __COPYUSER_RESTORE_ES
19207 + ".section .fixup,\"ax\"\n"
19208 + "101: lea 0(%%eax,%0,4),%0\n"
19209 + " jmp 100b\n"
19210 + ".previous\n"
19211 + ".section __ex_table,\"a\"\n"
19212 + " .align 4\n"
19213 + " .long 1b,100b\n"
19214 + " .long 2b,100b\n"
19215 + " .long 3b,100b\n"
19216 + " .long 4b,100b\n"
19217 + " .long 5b,100b\n"
19218 + " .long 6b,100b\n"
19219 + " .long 7b,100b\n"
19220 + " .long 8b,100b\n"
19221 + " .long 9b,100b\n"
19222 + " .long 10b,100b\n"
19223 + " .long 11b,100b\n"
19224 + " .long 12b,100b\n"
19225 + " .long 13b,100b\n"
19226 + " .long 14b,100b\n"
19227 + " .long 15b,100b\n"
19228 + " .long 16b,100b\n"
19229 + " .long 17b,100b\n"
19230 + " .long 18b,100b\n"
19231 + " .long 19b,100b\n"
19232 + " .long 20b,100b\n"
19233 + " .long 21b,100b\n"
19234 + " .long 22b,100b\n"
19235 + " .long 23b,100b\n"
19236 + " .long 24b,100b\n"
19237 + " .long 25b,100b\n"
19238 + " .long 26b,100b\n"
19239 + " .long 27b,100b\n"
19240 + " .long 28b,100b\n"
19241 + " .long 29b,100b\n"
19242 + " .long 30b,100b\n"
19243 + " .long 31b,100b\n"
19244 + " .long 32b,100b\n"
19245 + " .long 33b,100b\n"
19246 + " .long 34b,100b\n"
19247 + " .long 35b,100b\n"
19248 + " .long 36b,100b\n"
19249 + " .long 37b,100b\n"
19250 + " .long 99b,101b\n"
19251 + ".previous"
19252 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
19253 + : "1"(to), "2"(from), "0"(size)
19254 + : "eax", "edx", "memory");
19255 + return size;
19256 +}
19257 +
19258 +static unsigned long
19259 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
19260 +{
19261 + int d0, d1;
19262 + __asm__ __volatile__(
19263 + " .align 2,0x90\n"
19264 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
19265 + " cmpl $67, %0\n"
19266 + " jbe 3f\n"
19267 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
19268 + " .align 2,0x90\n"
19269 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
19270 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
19271 + "5: movl %%eax, 0(%3)\n"
19272 + "6: movl %%edx, 4(%3)\n"
19273 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
19274 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
19275 + "9: movl %%eax, 8(%3)\n"
19276 + "10: movl %%edx, 12(%3)\n"
19277 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
19278 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
19279 + "13: movl %%eax, 16(%3)\n"
19280 + "14: movl %%edx, 20(%3)\n"
19281 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
19282 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
19283 + "17: movl %%eax, 24(%3)\n"
19284 + "18: movl %%edx, 28(%3)\n"
19285 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
19286 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
19287 + "21: movl %%eax, 32(%3)\n"
19288 + "22: movl %%edx, 36(%3)\n"
19289 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
19290 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
19291 + "25: movl %%eax, 40(%3)\n"
19292 + "26: movl %%edx, 44(%3)\n"
19293 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
19294 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
19295 + "29: movl %%eax, 48(%3)\n"
19296 + "30: movl %%edx, 52(%3)\n"
19297 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
19298 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
19299 + "33: movl %%eax, 56(%3)\n"
19300 + "34: movl %%edx, 60(%3)\n"
19301 + " addl $-64, %0\n"
19302 + " addl $64, %4\n"
19303 + " addl $64, %3\n"
19304 + " cmpl $63, %0\n"
19305 + " ja 1b\n"
19306 + "35: movl %0, %%eax\n"
19307 + " shrl $2, %0\n"
19308 + " andl $3, %%eax\n"
19309 + " cld\n"
19310 + "99: rep; "__copyuser_seg" movsl\n"
19311 + "36: movl %%eax, %0\n"
19312 + "37: rep; "__copyuser_seg" movsb\n"
19313 + "100:\n"
19314 ".section .fixup,\"ax\"\n"
19315 "101: lea 0(%%eax,%0,4),%0\n"
19316 " jmp 100b\n"
19317 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
19318 int d0, d1;
19319 __asm__ __volatile__(
19320 " .align 2,0x90\n"
19321 - "0: movl 32(%4), %%eax\n"
19322 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19323 " cmpl $67, %0\n"
19324 " jbe 2f\n"
19325 - "1: movl 64(%4), %%eax\n"
19326 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19327 " .align 2,0x90\n"
19328 - "2: movl 0(%4), %%eax\n"
19329 - "21: movl 4(%4), %%edx\n"
19330 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19331 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19332 " movl %%eax, 0(%3)\n"
19333 " movl %%edx, 4(%3)\n"
19334 - "3: movl 8(%4), %%eax\n"
19335 - "31: movl 12(%4),%%edx\n"
19336 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19337 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19338 " movl %%eax, 8(%3)\n"
19339 " movl %%edx, 12(%3)\n"
19340 - "4: movl 16(%4), %%eax\n"
19341 - "41: movl 20(%4), %%edx\n"
19342 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19343 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19344 " movl %%eax, 16(%3)\n"
19345 " movl %%edx, 20(%3)\n"
19346 - "10: movl 24(%4), %%eax\n"
19347 - "51: movl 28(%4), %%edx\n"
19348 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19349 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19350 " movl %%eax, 24(%3)\n"
19351 " movl %%edx, 28(%3)\n"
19352 - "11: movl 32(%4), %%eax\n"
19353 - "61: movl 36(%4), %%edx\n"
19354 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19355 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19356 " movl %%eax, 32(%3)\n"
19357 " movl %%edx, 36(%3)\n"
19358 - "12: movl 40(%4), %%eax\n"
19359 - "71: movl 44(%4), %%edx\n"
19360 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19361 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19362 " movl %%eax, 40(%3)\n"
19363 " movl %%edx, 44(%3)\n"
19364 - "13: movl 48(%4), %%eax\n"
19365 - "81: movl 52(%4), %%edx\n"
19366 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19367 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19368 " movl %%eax, 48(%3)\n"
19369 " movl %%edx, 52(%3)\n"
19370 - "14: movl 56(%4), %%eax\n"
19371 - "91: movl 60(%4), %%edx\n"
19372 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19373 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19374 " movl %%eax, 56(%3)\n"
19375 " movl %%edx, 60(%3)\n"
19376 " addl $-64, %0\n"
19377 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
19378 " shrl $2, %0\n"
19379 " andl $3, %%eax\n"
19380 " cld\n"
19381 - "6: rep; movsl\n"
19382 + "6: rep; "__copyuser_seg" movsl\n"
19383 " movl %%eax,%0\n"
19384 - "7: rep; movsb\n"
19385 + "7: rep; "__copyuser_seg" movsb\n"
19386 "8:\n"
19387 ".section .fixup,\"ax\"\n"
19388 "9: lea 0(%%eax,%0,4),%0\n"
19389 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
19390
19391 __asm__ __volatile__(
19392 " .align 2,0x90\n"
19393 - "0: movl 32(%4), %%eax\n"
19394 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19395 " cmpl $67, %0\n"
19396 " jbe 2f\n"
19397 - "1: movl 64(%4), %%eax\n"
19398 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19399 " .align 2,0x90\n"
19400 - "2: movl 0(%4), %%eax\n"
19401 - "21: movl 4(%4), %%edx\n"
19402 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19403 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19404 " movnti %%eax, 0(%3)\n"
19405 " movnti %%edx, 4(%3)\n"
19406 - "3: movl 8(%4), %%eax\n"
19407 - "31: movl 12(%4),%%edx\n"
19408 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19409 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19410 " movnti %%eax, 8(%3)\n"
19411 " movnti %%edx, 12(%3)\n"
19412 - "4: movl 16(%4), %%eax\n"
19413 - "41: movl 20(%4), %%edx\n"
19414 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19415 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19416 " movnti %%eax, 16(%3)\n"
19417 " movnti %%edx, 20(%3)\n"
19418 - "10: movl 24(%4), %%eax\n"
19419 - "51: movl 28(%4), %%edx\n"
19420 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19421 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19422 " movnti %%eax, 24(%3)\n"
19423 " movnti %%edx, 28(%3)\n"
19424 - "11: movl 32(%4), %%eax\n"
19425 - "61: movl 36(%4), %%edx\n"
19426 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19427 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19428 " movnti %%eax, 32(%3)\n"
19429 " movnti %%edx, 36(%3)\n"
19430 - "12: movl 40(%4), %%eax\n"
19431 - "71: movl 44(%4), %%edx\n"
19432 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19433 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19434 " movnti %%eax, 40(%3)\n"
19435 " movnti %%edx, 44(%3)\n"
19436 - "13: movl 48(%4), %%eax\n"
19437 - "81: movl 52(%4), %%edx\n"
19438 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19439 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19440 " movnti %%eax, 48(%3)\n"
19441 " movnti %%edx, 52(%3)\n"
19442 - "14: movl 56(%4), %%eax\n"
19443 - "91: movl 60(%4), %%edx\n"
19444 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19445 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19446 " movnti %%eax, 56(%3)\n"
19447 " movnti %%edx, 60(%3)\n"
19448 " addl $-64, %0\n"
19449 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
19450 " shrl $2, %0\n"
19451 " andl $3, %%eax\n"
19452 " cld\n"
19453 - "6: rep; movsl\n"
19454 + "6: rep; "__copyuser_seg" movsl\n"
19455 " movl %%eax,%0\n"
19456 - "7: rep; movsb\n"
19457 + "7: rep; "__copyuser_seg" movsb\n"
19458 "8:\n"
19459 ".section .fixup,\"ax\"\n"
19460 "9: lea 0(%%eax,%0,4),%0\n"
19461 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
19462
19463 __asm__ __volatile__(
19464 " .align 2,0x90\n"
19465 - "0: movl 32(%4), %%eax\n"
19466 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19467 " cmpl $67, %0\n"
19468 " jbe 2f\n"
19469 - "1: movl 64(%4), %%eax\n"
19470 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19471 " .align 2,0x90\n"
19472 - "2: movl 0(%4), %%eax\n"
19473 - "21: movl 4(%4), %%edx\n"
19474 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19475 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19476 " movnti %%eax, 0(%3)\n"
19477 " movnti %%edx, 4(%3)\n"
19478 - "3: movl 8(%4), %%eax\n"
19479 - "31: movl 12(%4),%%edx\n"
19480 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19481 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19482 " movnti %%eax, 8(%3)\n"
19483 " movnti %%edx, 12(%3)\n"
19484 - "4: movl 16(%4), %%eax\n"
19485 - "41: movl 20(%4), %%edx\n"
19486 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19487 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19488 " movnti %%eax, 16(%3)\n"
19489 " movnti %%edx, 20(%3)\n"
19490 - "10: movl 24(%4), %%eax\n"
19491 - "51: movl 28(%4), %%edx\n"
19492 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19493 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19494 " movnti %%eax, 24(%3)\n"
19495 " movnti %%edx, 28(%3)\n"
19496 - "11: movl 32(%4), %%eax\n"
19497 - "61: movl 36(%4), %%edx\n"
19498 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19499 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19500 " movnti %%eax, 32(%3)\n"
19501 " movnti %%edx, 36(%3)\n"
19502 - "12: movl 40(%4), %%eax\n"
19503 - "71: movl 44(%4), %%edx\n"
19504 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19505 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19506 " movnti %%eax, 40(%3)\n"
19507 " movnti %%edx, 44(%3)\n"
19508 - "13: movl 48(%4), %%eax\n"
19509 - "81: movl 52(%4), %%edx\n"
19510 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19511 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19512 " movnti %%eax, 48(%3)\n"
19513 " movnti %%edx, 52(%3)\n"
19514 - "14: movl 56(%4), %%eax\n"
19515 - "91: movl 60(%4), %%edx\n"
19516 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19517 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19518 " movnti %%eax, 56(%3)\n"
19519 " movnti %%edx, 60(%3)\n"
19520 " addl $-64, %0\n"
19521 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
19522 " shrl $2, %0\n"
19523 " andl $3, %%eax\n"
19524 " cld\n"
19525 - "6: rep; movsl\n"
19526 + "6: rep; "__copyuser_seg" movsl\n"
19527 " movl %%eax,%0\n"
19528 - "7: rep; movsb\n"
19529 + "7: rep; "__copyuser_seg" movsb\n"
19530 "8:\n"
19531 ".section .fixup,\"ax\"\n"
19532 "9: lea 0(%%eax,%0,4),%0\n"
19533 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
19534 */
19535 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
19536 unsigned long size);
19537 -unsigned long __copy_user_intel(void __user *to, const void *from,
19538 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
19539 + unsigned long size);
19540 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
19541 unsigned long size);
19542 unsigned long __copy_user_zeroing_intel_nocache(void *to,
19543 const void __user *from, unsigned long size);
19544 #endif /* CONFIG_X86_INTEL_USERCOPY */
19545
19546 /* Generic arbitrary sized copy. */
19547 -#define __copy_user(to, from, size) \
19548 +#define __copy_user(to, from, size, prefix, set, restore) \
19549 do { \
19550 int __d0, __d1, __d2; \
19551 __asm__ __volatile__( \
19552 + set \
19553 " cmp $7,%0\n" \
19554 " jbe 1f\n" \
19555 " movl %1,%0\n" \
19556 " negl %0\n" \
19557 " andl $7,%0\n" \
19558 " subl %0,%3\n" \
19559 - "4: rep; movsb\n" \
19560 + "4: rep; "prefix"movsb\n" \
19561 " movl %3,%0\n" \
19562 " shrl $2,%0\n" \
19563 " andl $3,%3\n" \
19564 " .align 2,0x90\n" \
19565 - "0: rep; movsl\n" \
19566 + "0: rep; "prefix"movsl\n" \
19567 " movl %3,%0\n" \
19568 - "1: rep; movsb\n" \
19569 + "1: rep; "prefix"movsb\n" \
19570 "2:\n" \
19571 + restore \
19572 ".section .fixup,\"ax\"\n" \
19573 "5: addl %3,%0\n" \
19574 " jmp 2b\n" \
19575 @@ -682,14 +799,14 @@ do { \
19576 " negl %0\n" \
19577 " andl $7,%0\n" \
19578 " subl %0,%3\n" \
19579 - "4: rep; movsb\n" \
19580 + "4: rep; "__copyuser_seg"movsb\n" \
19581 " movl %3,%0\n" \
19582 " shrl $2,%0\n" \
19583 " andl $3,%3\n" \
19584 " .align 2,0x90\n" \
19585 - "0: rep; movsl\n" \
19586 + "0: rep; "__copyuser_seg"movsl\n" \
19587 " movl %3,%0\n" \
19588 - "1: rep; movsb\n" \
19589 + "1: rep; "__copyuser_seg"movsb\n" \
19590 "2:\n" \
19591 ".section .fixup,\"ax\"\n" \
19592 "5: addl %3,%0\n" \
19593 @@ -775,9 +892,9 @@ survive:
19594 }
19595 #endif
19596 if (movsl_is_ok(to, from, n))
19597 - __copy_user(to, from, n);
19598 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
19599 else
19600 - n = __copy_user_intel(to, from, n);
19601 + n = __generic_copy_to_user_intel(to, from, n);
19602 return n;
19603 }
19604 EXPORT_SYMBOL(__copy_to_user_ll);
19605 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
19606 unsigned long n)
19607 {
19608 if (movsl_is_ok(to, from, n))
19609 - __copy_user(to, from, n);
19610 + __copy_user(to, from, n, __copyuser_seg, "", "");
19611 else
19612 - n = __copy_user_intel((void __user *)to,
19613 - (const void *)from, n);
19614 + n = __generic_copy_from_user_intel(to, from, n);
19615 return n;
19616 }
19617 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
19618 @@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
19619 if (n > 64 && cpu_has_xmm2)
19620 n = __copy_user_intel_nocache(to, from, n);
19621 else
19622 - __copy_user(to, from, n);
19623 + __copy_user(to, from, n, __copyuser_seg, "", "");
19624 #else
19625 - __copy_user(to, from, n);
19626 + __copy_user(to, from, n, __copyuser_seg, "", "");
19627 #endif
19628 return n;
19629 }
19630 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
19631
19632 -/**
19633 - * copy_to_user: - Copy a block of data into user space.
19634 - * @to: Destination address, in user space.
19635 - * @from: Source address, in kernel space.
19636 - * @n: Number of bytes to copy.
19637 - *
19638 - * Context: User context only. This function may sleep.
19639 - *
19640 - * Copy data from kernel space to user space.
19641 - *
19642 - * Returns number of bytes that could not be copied.
19643 - * On success, this will be zero.
19644 - */
19645 -unsigned long
19646 -copy_to_user(void __user *to, const void *from, unsigned long n)
19647 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19648 +void __set_fs(mm_segment_t x)
19649 {
19650 - if (access_ok(VERIFY_WRITE, to, n))
19651 - n = __copy_to_user(to, from, n);
19652 - return n;
19653 + switch (x.seg) {
19654 + case 0:
19655 + loadsegment(gs, 0);
19656 + break;
19657 + case TASK_SIZE_MAX:
19658 + loadsegment(gs, __USER_DS);
19659 + break;
19660 + case -1UL:
19661 + loadsegment(gs, __KERNEL_DS);
19662 + break;
19663 + default:
19664 + BUG();
19665 + }
19666 + return;
19667 }
19668 -EXPORT_SYMBOL(copy_to_user);
19669 +EXPORT_SYMBOL(__set_fs);
19670
19671 -/**
19672 - * copy_from_user: - Copy a block of data from user space.
19673 - * @to: Destination address, in kernel space.
19674 - * @from: Source address, in user space.
19675 - * @n: Number of bytes to copy.
19676 - *
19677 - * Context: User context only. This function may sleep.
19678 - *
19679 - * Copy data from user space to kernel space.
19680 - *
19681 - * Returns number of bytes that could not be copied.
19682 - * On success, this will be zero.
19683 - *
19684 - * If some data could not be copied, this function will pad the copied
19685 - * data to the requested size using zero bytes.
19686 - */
19687 -unsigned long
19688 -copy_from_user(void *to, const void __user *from, unsigned long n)
19689 +void set_fs(mm_segment_t x)
19690 {
19691 - if (access_ok(VERIFY_READ, from, n))
19692 - n = __copy_from_user(to, from, n);
19693 - else
19694 - memset(to, 0, n);
19695 - return n;
19696 + current_thread_info()->addr_limit = x;
19697 + __set_fs(x);
19698 }
19699 -EXPORT_SYMBOL(copy_from_user);
19700 +EXPORT_SYMBOL(set_fs);
19701 +#endif
19702 diff -urNp linux-2.6.32.42/arch/x86/lib/usercopy_64.c linux-2.6.32.42/arch/x86/lib/usercopy_64.c
19703 --- linux-2.6.32.42/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
19704 +++ linux-2.6.32.42/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
19705 @@ -42,6 +42,12 @@ long
19706 __strncpy_from_user(char *dst, const char __user *src, long count)
19707 {
19708 long res;
19709 +
19710 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19711 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19712 + src += PAX_USER_SHADOW_BASE;
19713 +#endif
19714 +
19715 __do_strncpy_from_user(dst, src, count, res);
19716 return res;
19717 }
19718 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
19719 {
19720 long __d0;
19721 might_fault();
19722 +
19723 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19724 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
19725 + addr += PAX_USER_SHADOW_BASE;
19726 +#endif
19727 +
19728 /* no memory constraint because it doesn't change any memory gcc knows
19729 about */
19730 asm volatile(
19731 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
19732
19733 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
19734 {
19735 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19736 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
19737 +
19738 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19739 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
19740 + to += PAX_USER_SHADOW_BASE;
19741 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
19742 + from += PAX_USER_SHADOW_BASE;
19743 +#endif
19744 +
19745 return copy_user_generic((__force void *)to, (__force void *)from, len);
19746 - }
19747 - return len;
19748 + }
19749 + return len;
19750 }
19751 EXPORT_SYMBOL(copy_in_user);
19752
19753 diff -urNp linux-2.6.32.42/arch/x86/Makefile linux-2.6.32.42/arch/x86/Makefile
19754 --- linux-2.6.32.42/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
19755 +++ linux-2.6.32.42/arch/x86/Makefile 2011-04-17 15:56:46.000000000 -0400
19756 @@ -189,3 +189,12 @@ define archhelp
19757 echo ' FDARGS="..." arguments for the booted kernel'
19758 echo ' FDINITRD=file initrd for the booted kernel'
19759 endef
19760 +
19761 +define OLD_LD
19762 +
19763 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
19764 +*** Please upgrade your binutils to 2.18 or newer
19765 +endef
19766 +
19767 +archprepare:
19768 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
19769 diff -urNp linux-2.6.32.42/arch/x86/mm/extable.c linux-2.6.32.42/arch/x86/mm/extable.c
19770 --- linux-2.6.32.42/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
19771 +++ linux-2.6.32.42/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
19772 @@ -1,14 +1,71 @@
19773 #include <linux/module.h>
19774 #include <linux/spinlock.h>
19775 +#include <linux/sort.h>
19776 #include <asm/uaccess.h>
19777 +#include <asm/pgtable.h>
19778
19779 +/*
19780 + * The exception table needs to be sorted so that the binary
19781 + * search that we use to find entries in it works properly.
19782 + * This is used both for the kernel exception table and for
19783 + * the exception tables of modules that get loaded.
19784 + */
19785 +static int cmp_ex(const void *a, const void *b)
19786 +{
19787 + const struct exception_table_entry *x = a, *y = b;
19788 +
19789 + /* avoid overflow */
19790 + if (x->insn > y->insn)
19791 + return 1;
19792 + if (x->insn < y->insn)
19793 + return -1;
19794 + return 0;
19795 +}
19796 +
19797 +static void swap_ex(void *a, void *b, int size)
19798 +{
19799 + struct exception_table_entry t, *x = a, *y = b;
19800 +
19801 + t = *x;
19802 +
19803 + pax_open_kernel();
19804 + *x = *y;
19805 + *y = t;
19806 + pax_close_kernel();
19807 +}
19808 +
19809 +void sort_extable(struct exception_table_entry *start,
19810 + struct exception_table_entry *finish)
19811 +{
19812 + sort(start, finish - start, sizeof(struct exception_table_entry),
19813 + cmp_ex, swap_ex);
19814 +}
19815 +
19816 +#ifdef CONFIG_MODULES
19817 +/*
19818 + * If the exception table is sorted, any referring to the module init
19819 + * will be at the beginning or the end.
19820 + */
19821 +void trim_init_extable(struct module *m)
19822 +{
19823 + /*trim the beginning*/
19824 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
19825 + m->extable++;
19826 + m->num_exentries--;
19827 + }
19828 + /*trim the end*/
19829 + while (m->num_exentries &&
19830 + within_module_init(m->extable[m->num_exentries-1].insn, m))
19831 + m->num_exentries--;
19832 +}
19833 +#endif /* CONFIG_MODULES */
19834
19835 int fixup_exception(struct pt_regs *regs)
19836 {
19837 const struct exception_table_entry *fixup;
19838
19839 #ifdef CONFIG_PNPBIOS
19840 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
19841 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
19842 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
19843 extern u32 pnp_bios_is_utter_crap;
19844 pnp_bios_is_utter_crap = 1;
19845 diff -urNp linux-2.6.32.42/arch/x86/mm/fault.c linux-2.6.32.42/arch/x86/mm/fault.c
19846 --- linux-2.6.32.42/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
19847 +++ linux-2.6.32.42/arch/x86/mm/fault.c 2011-06-06 17:35:16.000000000 -0400
19848 @@ -11,10 +11,19 @@
19849 #include <linux/kprobes.h> /* __kprobes, ... */
19850 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
19851 #include <linux/perf_event.h> /* perf_sw_event */
19852 +#include <linux/unistd.h>
19853 +#include <linux/compiler.h>
19854
19855 #include <asm/traps.h> /* dotraplinkage, ... */
19856 #include <asm/pgalloc.h> /* pgd_*(), ... */
19857 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
19858 +#include <asm/vsyscall.h>
19859 +#include <asm/tlbflush.h>
19860 +
19861 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19862 +#include <asm/stacktrace.h>
19863 +#include "../kernel/dumpstack.h"
19864 +#endif
19865
19866 /*
19867 * Page fault error code bits:
19868 @@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
19869 int ret = 0;
19870
19871 /* kprobe_running() needs smp_processor_id() */
19872 - if (kprobes_built_in() && !user_mode_vm(regs)) {
19873 + if (kprobes_built_in() && !user_mode(regs)) {
19874 preempt_disable();
19875 if (kprobe_running() && kprobe_fault_handler(regs, 14))
19876 ret = 1;
19877 @@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
19878 return !instr_lo || (instr_lo>>1) == 1;
19879 case 0x00:
19880 /* Prefetch instruction is 0x0F0D or 0x0F18 */
19881 - if (probe_kernel_address(instr, opcode))
19882 + if (user_mode(regs)) {
19883 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
19884 + return 0;
19885 + } else if (probe_kernel_address(instr, opcode))
19886 return 0;
19887
19888 *prefetch = (instr_lo == 0xF) &&
19889 @@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
19890 while (instr < max_instr) {
19891 unsigned char opcode;
19892
19893 - if (probe_kernel_address(instr, opcode))
19894 + if (user_mode(regs)) {
19895 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
19896 + break;
19897 + } else if (probe_kernel_address(instr, opcode))
19898 break;
19899
19900 instr++;
19901 @@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
19902 force_sig_info(si_signo, &info, tsk);
19903 }
19904
19905 +#ifdef CONFIG_PAX_EMUTRAMP
19906 +static int pax_handle_fetch_fault(struct pt_regs *regs);
19907 +#endif
19908 +
19909 +#ifdef CONFIG_PAX_PAGEEXEC
19910 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
19911 +{
19912 + pgd_t *pgd;
19913 + pud_t *pud;
19914 + pmd_t *pmd;
19915 +
19916 + pgd = pgd_offset(mm, address);
19917 + if (!pgd_present(*pgd))
19918 + return NULL;
19919 + pud = pud_offset(pgd, address);
19920 + if (!pud_present(*pud))
19921 + return NULL;
19922 + pmd = pmd_offset(pud, address);
19923 + if (!pmd_present(*pmd))
19924 + return NULL;
19925 + return pmd;
19926 +}
19927 +#endif
19928 +
19929 DEFINE_SPINLOCK(pgd_lock);
19930 LIST_HEAD(pgd_list);
19931
19932 @@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
19933 address += PMD_SIZE) {
19934
19935 unsigned long flags;
19936 +
19937 +#ifdef CONFIG_PAX_PER_CPU_PGD
19938 + unsigned long cpu;
19939 +#else
19940 struct page *page;
19941 +#endif
19942
19943 spin_lock_irqsave(&pgd_lock, flags);
19944 +
19945 +#ifdef CONFIG_PAX_PER_CPU_PGD
19946 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19947 + pgd_t *pgd = get_cpu_pgd(cpu);
19948 +#else
19949 list_for_each_entry(page, &pgd_list, lru) {
19950 - if (!vmalloc_sync_one(page_address(page), address))
19951 + pgd_t *pgd = page_address(page);
19952 +#endif
19953 +
19954 + if (!vmalloc_sync_one(pgd, address))
19955 break;
19956 }
19957 spin_unlock_irqrestore(&pgd_lock, flags);
19958 @@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
19959 * an interrupt in the middle of a task switch..
19960 */
19961 pgd_paddr = read_cr3();
19962 +
19963 +#ifdef CONFIG_PAX_PER_CPU_PGD
19964 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
19965 +#endif
19966 +
19967 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
19968 if (!pmd_k)
19969 return -1;
19970 @@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
19971
19972 const pgd_t *pgd_ref = pgd_offset_k(address);
19973 unsigned long flags;
19974 +
19975 +#ifdef CONFIG_PAX_PER_CPU_PGD
19976 + unsigned long cpu;
19977 +#else
19978 struct page *page;
19979 +#endif
19980
19981 if (pgd_none(*pgd_ref))
19982 continue;
19983
19984 spin_lock_irqsave(&pgd_lock, flags);
19985 +
19986 +#ifdef CONFIG_PAX_PER_CPU_PGD
19987 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19988 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
19989 +#else
19990 list_for_each_entry(page, &pgd_list, lru) {
19991 pgd_t *pgd;
19992 pgd = (pgd_t *)page_address(page) + pgd_index(address);
19993 +#endif
19994 +
19995 if (pgd_none(*pgd))
19996 set_pgd(pgd, *pgd_ref);
19997 else
19998 @@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
19999 * happen within a race in page table update. In the later
20000 * case just flush:
20001 */
20002 +
20003 +#ifdef CONFIG_PAX_PER_CPU_PGD
20004 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
20005 + pgd = pgd_offset_cpu(smp_processor_id(), address);
20006 +#else
20007 pgd = pgd_offset(current->active_mm, address);
20008 +#endif
20009 +
20010 pgd_ref = pgd_offset_k(address);
20011 if (pgd_none(*pgd_ref))
20012 return -1;
20013 @@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
20014 static int is_errata100(struct pt_regs *regs, unsigned long address)
20015 {
20016 #ifdef CONFIG_X86_64
20017 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
20018 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
20019 return 1;
20020 #endif
20021 return 0;
20022 @@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
20023 }
20024
20025 static const char nx_warning[] = KERN_CRIT
20026 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20027 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20028
20029 static void
20030 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20031 @@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
20032 if (!oops_may_print())
20033 return;
20034
20035 - if (error_code & PF_INSTR) {
20036 + if (nx_enabled && (error_code & PF_INSTR)) {
20037 unsigned int level;
20038
20039 pte_t *pte = lookup_address(address, &level);
20040
20041 if (pte && pte_present(*pte) && !pte_exec(*pte))
20042 - printk(nx_warning, current_uid());
20043 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20044 }
20045
20046 +#ifdef CONFIG_PAX_KERNEXEC
20047 + if (init_mm.start_code <= address && address < init_mm.end_code) {
20048 + if (current->signal->curr_ip)
20049 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20050 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20051 + else
20052 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20053 + current->comm, task_pid_nr(current), current_uid(), current_euid());
20054 + }
20055 +#endif
20056 +
20057 printk(KERN_ALERT "BUG: unable to handle kernel ");
20058 if (address < PAGE_SIZE)
20059 printk(KERN_CONT "NULL pointer dereference");
20060 @@ -704,6 +791,68 @@ __bad_area_nosemaphore(struct pt_regs *r
20061 unsigned long address, int si_code)
20062 {
20063 struct task_struct *tsk = current;
20064 + struct mm_struct *mm = tsk->mm;
20065 +
20066 +#ifdef CONFIG_X86_64
20067 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
20068 + if (regs->ip == (unsigned long)vgettimeofday) {
20069 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
20070 + return;
20071 + } else if (regs->ip == (unsigned long)vtime) {
20072 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
20073 + return;
20074 + } else if (regs->ip == (unsigned long)vgetcpu) {
20075 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
20076 + return;
20077 + }
20078 + }
20079 +#endif
20080 +
20081 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20082 + if (mm && (error_code & PF_USER)) {
20083 + unsigned long ip = regs->ip;
20084 +
20085 + if (v8086_mode(regs))
20086 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20087 +
20088 + /*
20089 + * It's possible to have interrupts off here:
20090 + */
20091 + local_irq_enable();
20092 +
20093 +#ifdef CONFIG_PAX_PAGEEXEC
20094 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
20095 + ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
20096 +
20097 +#ifdef CONFIG_PAX_EMUTRAMP
20098 + switch (pax_handle_fetch_fault(regs)) {
20099 + case 2:
20100 + return;
20101 + }
20102 +#endif
20103 +
20104 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20105 + do_group_exit(SIGKILL);
20106 + }
20107 +#endif
20108 +
20109 +#ifdef CONFIG_PAX_SEGMEXEC
20110 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
20111 +
20112 +#ifdef CONFIG_PAX_EMUTRAMP
20113 + switch (pax_handle_fetch_fault(regs)) {
20114 + case 2:
20115 + return;
20116 + }
20117 +#endif
20118 +
20119 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20120 + do_group_exit(SIGKILL);
20121 + }
20122 +#endif
20123 +
20124 + }
20125 +#endif
20126
20127 /* User mode accesses just cause a SIGSEGV */
20128 if (error_code & PF_USER) {
20129 @@ -857,6 +1006,99 @@ static int spurious_fault_check(unsigned
20130 return 1;
20131 }
20132
20133 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20134 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20135 +{
20136 + pte_t *pte;
20137 + pmd_t *pmd;
20138 + spinlock_t *ptl;
20139 + unsigned char pte_mask;
20140 +
20141 + if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20142 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
20143 + return 0;
20144 +
20145 + /* PaX: it's our fault, let's handle it if we can */
20146 +
20147 + /* PaX: take a look at read faults before acquiring any locks */
20148 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20149 + /* instruction fetch attempt from a protected page in user mode */
20150 + up_read(&mm->mmap_sem);
20151 +
20152 +#ifdef CONFIG_PAX_EMUTRAMP
20153 + switch (pax_handle_fetch_fault(regs)) {
20154 + case 2:
20155 + return 1;
20156 + }
20157 +#endif
20158 +
20159 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20160 + do_group_exit(SIGKILL);
20161 + }
20162 +
20163 + pmd = pax_get_pmd(mm, address);
20164 + if (unlikely(!pmd))
20165 + return 0;
20166 +
20167 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20168 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20169 + pte_unmap_unlock(pte, ptl);
20170 + return 0;
20171 + }
20172 +
20173 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20174 + /* write attempt to a protected page in user mode */
20175 + pte_unmap_unlock(pte, ptl);
20176 + return 0;
20177 + }
20178 +
20179 +#ifdef CONFIG_SMP
20180 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20181 +#else
20182 + if (likely(address > get_limit(regs->cs)))
20183 +#endif
20184 + {
20185 + set_pte(pte, pte_mkread(*pte));
20186 + __flush_tlb_one(address);
20187 + pte_unmap_unlock(pte, ptl);
20188 + up_read(&mm->mmap_sem);
20189 + return 1;
20190 + }
20191 +
20192 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20193 +
20194 + /*
20195 + * PaX: fill DTLB with user rights and retry
20196 + */
20197 + __asm__ __volatile__ (
20198 + "orb %2,(%1)\n"
20199 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20200 +/*
20201 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20202 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20203 + * page fault when examined during a TLB load attempt. this is true not only
20204 + * for PTEs holding a non-present entry but also present entries that will
20205 + * raise a page fault (such as those set up by PaX, or the copy-on-write
20206 + * mechanism). in effect it means that we do *not* need to flush the TLBs
20207 + * for our target pages since their PTEs are simply not in the TLBs at all.
20208 +
20209 + * the best thing in omitting it is that we gain around 15-20% speed in the
20210 + * fast path of the page fault handler and can get rid of tracing since we
20211 + * can no longer flush unintended entries.
20212 + */
20213 + "invlpg (%0)\n"
20214 +#endif
20215 + __copyuser_seg"testb $0,(%0)\n"
20216 + "xorb %3,(%1)\n"
20217 + :
20218 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
20219 + : "memory", "cc");
20220 + pte_unmap_unlock(pte, ptl);
20221 + up_read(&mm->mmap_sem);
20222 + return 1;
20223 +}
20224 +#endif
20225 +
20226 /*
20227 * Handle a spurious fault caused by a stale TLB entry.
20228 *
20229 @@ -923,6 +1165,9 @@ int show_unhandled_signals = 1;
20230 static inline int
20231 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
20232 {
20233 + if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
20234 + return 1;
20235 +
20236 if (write) {
20237 /* write, present and write, not present: */
20238 if (unlikely(!(vma->vm_flags & VM_WRITE)))
20239 @@ -956,17 +1201,31 @@ do_page_fault(struct pt_regs *regs, unsi
20240 {
20241 struct vm_area_struct *vma;
20242 struct task_struct *tsk;
20243 - unsigned long address;
20244 struct mm_struct *mm;
20245 int write;
20246 int fault;
20247
20248 + /* Get the faulting address: */
20249 + unsigned long address = read_cr2();
20250 +
20251 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20252 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
20253 + if (!search_exception_tables(regs->ip)) {
20254 + bad_area_nosemaphore(regs, error_code, address);
20255 + return;
20256 + }
20257 + if (address < PAX_USER_SHADOW_BASE) {
20258 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
20259 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
20260 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
20261 + } else
20262 + address -= PAX_USER_SHADOW_BASE;
20263 + }
20264 +#endif
20265 +
20266 tsk = current;
20267 mm = tsk->mm;
20268
20269 - /* Get the faulting address: */
20270 - address = read_cr2();
20271 -
20272 /*
20273 * Detect and handle instructions that would cause a page fault for
20274 * both a tracked kernel page and a userspace page.
20275 @@ -1026,7 +1285,7 @@ do_page_fault(struct pt_regs *regs, unsi
20276 * User-mode registers count as a user access even for any
20277 * potential system fault or CPU buglet:
20278 */
20279 - if (user_mode_vm(regs)) {
20280 + if (user_mode(regs)) {
20281 local_irq_enable();
20282 error_code |= PF_USER;
20283 } else {
20284 @@ -1080,6 +1339,11 @@ do_page_fault(struct pt_regs *regs, unsi
20285 might_sleep();
20286 }
20287
20288 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20289 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
20290 + return;
20291 +#endif
20292 +
20293 vma = find_vma(mm, address);
20294 if (unlikely(!vma)) {
20295 bad_area(regs, error_code, address);
20296 @@ -1091,18 +1355,24 @@ do_page_fault(struct pt_regs *regs, unsi
20297 bad_area(regs, error_code, address);
20298 return;
20299 }
20300 - if (error_code & PF_USER) {
20301 - /*
20302 - * Accessing the stack below %sp is always a bug.
20303 - * The large cushion allows instructions like enter
20304 - * and pusha to work. ("enter $65535, $31" pushes
20305 - * 32 pointers and then decrements %sp by 65535.)
20306 - */
20307 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20308 - bad_area(regs, error_code, address);
20309 - return;
20310 - }
20311 + /*
20312 + * Accessing the stack below %sp is always a bug.
20313 + * The large cushion allows instructions like enter
20314 + * and pusha to work. ("enter $65535, $31" pushes
20315 + * 32 pointers and then decrements %sp by 65535.)
20316 + */
20317 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20318 + bad_area(regs, error_code, address);
20319 + return;
20320 + }
20321 +
20322 +#ifdef CONFIG_PAX_SEGMEXEC
20323 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
20324 + bad_area(regs, error_code, address);
20325 + return;
20326 }
20327 +#endif
20328 +
20329 if (unlikely(expand_stack(vma, address))) {
20330 bad_area(regs, error_code, address);
20331 return;
20332 @@ -1146,3 +1416,199 @@ good_area:
20333
20334 up_read(&mm->mmap_sem);
20335 }
20336 +
20337 +#ifdef CONFIG_PAX_EMUTRAMP
20338 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
20339 +{
20340 + int err;
20341 +
20342 + do { /* PaX: gcc trampoline emulation #1 */
20343 + unsigned char mov1, mov2;
20344 + unsigned short jmp;
20345 + unsigned int addr1, addr2;
20346 +
20347 +#ifdef CONFIG_X86_64
20348 + if ((regs->ip + 11) >> 32)
20349 + break;
20350 +#endif
20351 +
20352 + err = get_user(mov1, (unsigned char __user *)regs->ip);
20353 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20354 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
20355 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20356 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
20357 +
20358 + if (err)
20359 + break;
20360 +
20361 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
20362 + regs->cx = addr1;
20363 + regs->ax = addr2;
20364 + regs->ip = addr2;
20365 + return 2;
20366 + }
20367 + } while (0);
20368 +
20369 + do { /* PaX: gcc trampoline emulation #2 */
20370 + unsigned char mov, jmp;
20371 + unsigned int addr1, addr2;
20372 +
20373 +#ifdef CONFIG_X86_64
20374 + if ((regs->ip + 9) >> 32)
20375 + break;
20376 +#endif
20377 +
20378 + err = get_user(mov, (unsigned char __user *)regs->ip);
20379 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20380 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
20381 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20382 +
20383 + if (err)
20384 + break;
20385 +
20386 + if (mov == 0xB9 && jmp == 0xE9) {
20387 + regs->cx = addr1;
20388 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
20389 + return 2;
20390 + }
20391 + } while (0);
20392 +
20393 + return 1; /* PaX in action */
20394 +}
20395 +
20396 +#ifdef CONFIG_X86_64
20397 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
20398 +{
20399 + int err;
20400 +
20401 + do { /* PaX: gcc trampoline emulation #1 */
20402 + unsigned short mov1, mov2, jmp1;
20403 + unsigned char jmp2;
20404 + unsigned int addr1;
20405 + unsigned long addr2;
20406 +
20407 + err = get_user(mov1, (unsigned short __user *)regs->ip);
20408 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
20409 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
20410 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
20411 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
20412 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
20413 +
20414 + if (err)
20415 + break;
20416 +
20417 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20418 + regs->r11 = addr1;
20419 + regs->r10 = addr2;
20420 + regs->ip = addr1;
20421 + return 2;
20422 + }
20423 + } while (0);
20424 +
20425 + do { /* PaX: gcc trampoline emulation #2 */
20426 + unsigned short mov1, mov2, jmp1;
20427 + unsigned char jmp2;
20428 + unsigned long addr1, addr2;
20429 +
20430 + err = get_user(mov1, (unsigned short __user *)regs->ip);
20431 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
20432 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
20433 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
20434 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
20435 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
20436 +
20437 + if (err)
20438 + break;
20439 +
20440 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
20441 + regs->r11 = addr1;
20442 + regs->r10 = addr2;
20443 + regs->ip = addr1;
20444 + return 2;
20445 + }
20446 + } while (0);
20447 +
20448 + return 1; /* PaX in action */
20449 +}
20450 +#endif
20451 +
20452 +/*
20453 + * PaX: decide what to do with offenders (regs->ip = fault address)
20454 + *
20455 + * returns 1 when task should be killed
20456 + * 2 when gcc trampoline was detected
20457 + */
20458 +static int pax_handle_fetch_fault(struct pt_regs *regs)
20459 +{
20460 + if (v8086_mode(regs))
20461 + return 1;
20462 +
20463 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
20464 + return 1;
20465 +
20466 +#ifdef CONFIG_X86_32
20467 + return pax_handle_fetch_fault_32(regs);
20468 +#else
20469 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
20470 + return pax_handle_fetch_fault_32(regs);
20471 + else
20472 + return pax_handle_fetch_fault_64(regs);
20473 +#endif
20474 +}
20475 +#endif
20476 +
20477 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20478 +void pax_report_insns(void *pc, void *sp)
20479 +{
20480 + long i;
20481 +
20482 + printk(KERN_ERR "PAX: bytes at PC: ");
20483 + for (i = 0; i < 20; i++) {
20484 + unsigned char c;
20485 + if (get_user(c, (__force unsigned char __user *)pc+i))
20486 + printk(KERN_CONT "?? ");
20487 + else
20488 + printk(KERN_CONT "%02x ", c);
20489 + }
20490 + printk("\n");
20491 +
20492 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
20493 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
20494 + unsigned long c;
20495 + if (get_user(c, (__force unsigned long __user *)sp+i))
20496 +#ifdef CONFIG_X86_32
20497 + printk(KERN_CONT "???????? ");
20498 +#else
20499 + printk(KERN_CONT "???????????????? ");
20500 +#endif
20501 + else
20502 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
20503 + }
20504 + printk("\n");
20505 +}
20506 +#endif
20507 +
20508 +/**
20509 + * probe_kernel_write(): safely attempt to write to a location
20510 + * @dst: address to write to
20511 + * @src: pointer to the data that shall be written
20512 + * @size: size of the data chunk
20513 + *
20514 + * Safely write to address @dst from the buffer at @src. If a kernel fault
20515 + * happens, handle that and return -EFAULT.
20516 + */
20517 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
20518 +{
20519 + long ret;
20520 + mm_segment_t old_fs = get_fs();
20521 +
20522 + set_fs(KERNEL_DS);
20523 + pagefault_disable();
20524 + pax_open_kernel();
20525 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
20526 + pax_close_kernel();
20527 + pagefault_enable();
20528 + set_fs(old_fs);
20529 +
20530 + return ret ? -EFAULT : 0;
20531 +}
20532 diff -urNp linux-2.6.32.42/arch/x86/mm/gup.c linux-2.6.32.42/arch/x86/mm/gup.c
20533 --- linux-2.6.32.42/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
20534 +++ linux-2.6.32.42/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
20535 @@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
20536 addr = start;
20537 len = (unsigned long) nr_pages << PAGE_SHIFT;
20538 end = start + len;
20539 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20540 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
20541 (void __user *)start, len)))
20542 return 0;
20543
20544 diff -urNp linux-2.6.32.42/arch/x86/mm/highmem_32.c linux-2.6.32.42/arch/x86/mm/highmem_32.c
20545 --- linux-2.6.32.42/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
20546 +++ linux-2.6.32.42/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
20547 @@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
20548 idx = type + KM_TYPE_NR*smp_processor_id();
20549 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20550 BUG_ON(!pte_none(*(kmap_pte-idx)));
20551 +
20552 + pax_open_kernel();
20553 set_pte(kmap_pte-idx, mk_pte(page, prot));
20554 + pax_close_kernel();
20555
20556 return (void *)vaddr;
20557 }
20558 diff -urNp linux-2.6.32.42/arch/x86/mm/hugetlbpage.c linux-2.6.32.42/arch/x86/mm/hugetlbpage.c
20559 --- linux-2.6.32.42/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
20560 +++ linux-2.6.32.42/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
20561 @@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
20562 struct hstate *h = hstate_file(file);
20563 struct mm_struct *mm = current->mm;
20564 struct vm_area_struct *vma;
20565 - unsigned long start_addr;
20566 + unsigned long start_addr, pax_task_size = TASK_SIZE;
20567 +
20568 +#ifdef CONFIG_PAX_SEGMEXEC
20569 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20570 + pax_task_size = SEGMEXEC_TASK_SIZE;
20571 +#endif
20572 +
20573 + pax_task_size -= PAGE_SIZE;
20574
20575 if (len > mm->cached_hole_size) {
20576 - start_addr = mm->free_area_cache;
20577 + start_addr = mm->free_area_cache;
20578 } else {
20579 - start_addr = TASK_UNMAPPED_BASE;
20580 - mm->cached_hole_size = 0;
20581 + start_addr = mm->mmap_base;
20582 + mm->cached_hole_size = 0;
20583 }
20584
20585 full_search:
20586 @@ -281,26 +288,27 @@ full_search:
20587
20588 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
20589 /* At this point: (!vma || addr < vma->vm_end). */
20590 - if (TASK_SIZE - len < addr) {
20591 + if (pax_task_size - len < addr) {
20592 /*
20593 * Start a new search - just in case we missed
20594 * some holes.
20595 */
20596 - if (start_addr != TASK_UNMAPPED_BASE) {
20597 - start_addr = TASK_UNMAPPED_BASE;
20598 + if (start_addr != mm->mmap_base) {
20599 + start_addr = mm->mmap_base;
20600 mm->cached_hole_size = 0;
20601 goto full_search;
20602 }
20603 return -ENOMEM;
20604 }
20605 - if (!vma || addr + len <= vma->vm_start) {
20606 - mm->free_area_cache = addr + len;
20607 - return addr;
20608 - }
20609 + if (check_heap_stack_gap(vma, addr, len))
20610 + break;
20611 if (addr + mm->cached_hole_size < vma->vm_start)
20612 mm->cached_hole_size = vma->vm_start - addr;
20613 addr = ALIGN(vma->vm_end, huge_page_size(h));
20614 }
20615 +
20616 + mm->free_area_cache = addr + len;
20617 + return addr;
20618 }
20619
20620 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
20621 @@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
20622 {
20623 struct hstate *h = hstate_file(file);
20624 struct mm_struct *mm = current->mm;
20625 - struct vm_area_struct *vma, *prev_vma;
20626 - unsigned long base = mm->mmap_base, addr = addr0;
20627 + struct vm_area_struct *vma;
20628 + unsigned long base = mm->mmap_base, addr;
20629 unsigned long largest_hole = mm->cached_hole_size;
20630 - int first_time = 1;
20631
20632 /* don't allow allocations above current base */
20633 if (mm->free_area_cache > base)
20634 @@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
20635 largest_hole = 0;
20636 mm->free_area_cache = base;
20637 }
20638 -try_again:
20639 +
20640 /* make sure it can fit in the remaining address space */
20641 if (mm->free_area_cache < len)
20642 goto fail;
20643
20644 /* either no address requested or cant fit in requested address hole */
20645 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
20646 + addr = (mm->free_area_cache - len);
20647 do {
20648 + addr &= huge_page_mask(h);
20649 + vma = find_vma(mm, addr);
20650 /*
20651 * Lookup failure means no vma is above this address,
20652 * i.e. return with success:
20653 - */
20654 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
20655 - return addr;
20656 -
20657 - /*
20658 * new region fits between prev_vma->vm_end and
20659 * vma->vm_start, use it:
20660 */
20661 - if (addr + len <= vma->vm_start &&
20662 - (!prev_vma || (addr >= prev_vma->vm_end))) {
20663 + if (check_heap_stack_gap(vma, addr, len)) {
20664 /* remember the address as a hint for next time */
20665 - mm->cached_hole_size = largest_hole;
20666 - return (mm->free_area_cache = addr);
20667 - } else {
20668 - /* pull free_area_cache down to the first hole */
20669 - if (mm->free_area_cache == vma->vm_end) {
20670 - mm->free_area_cache = vma->vm_start;
20671 - mm->cached_hole_size = largest_hole;
20672 - }
20673 + mm->cached_hole_size = largest_hole;
20674 + return (mm->free_area_cache = addr);
20675 + }
20676 + /* pull free_area_cache down to the first hole */
20677 + if (mm->free_area_cache == vma->vm_end) {
20678 + mm->free_area_cache = vma->vm_start;
20679 + mm->cached_hole_size = largest_hole;
20680 }
20681
20682 /* remember the largest hole we saw so far */
20683 if (addr + largest_hole < vma->vm_start)
20684 - largest_hole = vma->vm_start - addr;
20685 + largest_hole = vma->vm_start - addr;
20686
20687 /* try just below the current vma->vm_start */
20688 - addr = (vma->vm_start - len) & huge_page_mask(h);
20689 - } while (len <= vma->vm_start);
20690 + addr = skip_heap_stack_gap(vma, len);
20691 + } while (!IS_ERR_VALUE(addr));
20692
20693 fail:
20694 /*
20695 - * if hint left us with no space for the requested
20696 - * mapping then try again:
20697 - */
20698 - if (first_time) {
20699 - mm->free_area_cache = base;
20700 - largest_hole = 0;
20701 - first_time = 0;
20702 - goto try_again;
20703 - }
20704 - /*
20705 * A failed mmap() very likely causes application failure,
20706 * so fall back to the bottom-up function here. This scenario
20707 * can happen with large stack limits and large mmap()
20708 * allocations.
20709 */
20710 - mm->free_area_cache = TASK_UNMAPPED_BASE;
20711 +
20712 +#ifdef CONFIG_PAX_SEGMEXEC
20713 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20714 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
20715 + else
20716 +#endif
20717 +
20718 + mm->mmap_base = TASK_UNMAPPED_BASE;
20719 +
20720 +#ifdef CONFIG_PAX_RANDMMAP
20721 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20722 + mm->mmap_base += mm->delta_mmap;
20723 +#endif
20724 +
20725 + mm->free_area_cache = mm->mmap_base;
20726 mm->cached_hole_size = ~0UL;
20727 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
20728 len, pgoff, flags);
20729 @@ -387,6 +393,7 @@ fail:
20730 /*
20731 * Restore the topdown base:
20732 */
20733 + mm->mmap_base = base;
20734 mm->free_area_cache = base;
20735 mm->cached_hole_size = ~0UL;
20736
20737 @@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
20738 struct hstate *h = hstate_file(file);
20739 struct mm_struct *mm = current->mm;
20740 struct vm_area_struct *vma;
20741 + unsigned long pax_task_size = TASK_SIZE;
20742
20743 if (len & ~huge_page_mask(h))
20744 return -EINVAL;
20745 - if (len > TASK_SIZE)
20746 +
20747 +#ifdef CONFIG_PAX_SEGMEXEC
20748 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20749 + pax_task_size = SEGMEXEC_TASK_SIZE;
20750 +#endif
20751 +
20752 + pax_task_size -= PAGE_SIZE;
20753 +
20754 + if (len > pax_task_size)
20755 return -ENOMEM;
20756
20757 if (flags & MAP_FIXED) {
20758 @@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
20759 if (addr) {
20760 addr = ALIGN(addr, huge_page_size(h));
20761 vma = find_vma(mm, addr);
20762 - if (TASK_SIZE - len >= addr &&
20763 - (!vma || addr + len <= vma->vm_start))
20764 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
20765 return addr;
20766 }
20767 if (mm->get_unmapped_area == arch_get_unmapped_area)
20768 diff -urNp linux-2.6.32.42/arch/x86/mm/init_32.c linux-2.6.32.42/arch/x86/mm/init_32.c
20769 --- linux-2.6.32.42/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
20770 +++ linux-2.6.32.42/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
20771 @@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
20772 }
20773
20774 /*
20775 - * Creates a middle page table and puts a pointer to it in the
20776 - * given global directory entry. This only returns the gd entry
20777 - * in non-PAE compilation mode, since the middle layer is folded.
20778 - */
20779 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
20780 -{
20781 - pud_t *pud;
20782 - pmd_t *pmd_table;
20783 -
20784 -#ifdef CONFIG_X86_PAE
20785 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
20786 - if (after_bootmem)
20787 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
20788 - else
20789 - pmd_table = (pmd_t *)alloc_low_page();
20790 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
20791 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
20792 - pud = pud_offset(pgd, 0);
20793 - BUG_ON(pmd_table != pmd_offset(pud, 0));
20794 -
20795 - return pmd_table;
20796 - }
20797 -#endif
20798 - pud = pud_offset(pgd, 0);
20799 - pmd_table = pmd_offset(pud, 0);
20800 -
20801 - return pmd_table;
20802 -}
20803 -
20804 -/*
20805 * Create a page table and place a pointer to it in a middle page
20806 * directory entry:
20807 */
20808 @@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
20809 page_table = (pte_t *)alloc_low_page();
20810
20811 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
20812 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20813 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
20814 +#else
20815 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
20816 +#endif
20817 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
20818 }
20819
20820 return pte_offset_kernel(pmd, 0);
20821 }
20822
20823 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
20824 +{
20825 + pud_t *pud;
20826 + pmd_t *pmd_table;
20827 +
20828 + pud = pud_offset(pgd, 0);
20829 + pmd_table = pmd_offset(pud, 0);
20830 +
20831 + return pmd_table;
20832 +}
20833 +
20834 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
20835 {
20836 int pgd_idx = pgd_index(vaddr);
20837 @@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
20838 int pgd_idx, pmd_idx;
20839 unsigned long vaddr;
20840 pgd_t *pgd;
20841 + pud_t *pud;
20842 pmd_t *pmd;
20843 pte_t *pte = NULL;
20844
20845 @@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
20846 pgd = pgd_base + pgd_idx;
20847
20848 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
20849 - pmd = one_md_table_init(pgd);
20850 - pmd = pmd + pmd_index(vaddr);
20851 + pud = pud_offset(pgd, vaddr);
20852 + pmd = pmd_offset(pud, vaddr);
20853 +
20854 +#ifdef CONFIG_X86_PAE
20855 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20856 +#endif
20857 +
20858 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
20859 pmd++, pmd_idx++) {
20860 pte = page_table_kmap_check(one_page_table_init(pmd),
20861 @@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
20862 }
20863 }
20864
20865 -static inline int is_kernel_text(unsigned long addr)
20866 +static inline int is_kernel_text(unsigned long start, unsigned long end)
20867 {
20868 - if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
20869 - return 1;
20870 - return 0;
20871 + if ((start > ktla_ktva((unsigned long)_etext) ||
20872 + end <= ktla_ktva((unsigned long)_stext)) &&
20873 + (start > ktla_ktva((unsigned long)_einittext) ||
20874 + end <= ktla_ktva((unsigned long)_sinittext)) &&
20875 +
20876 +#ifdef CONFIG_ACPI_SLEEP
20877 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
20878 +#endif
20879 +
20880 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
20881 + return 0;
20882 + return 1;
20883 }
20884
20885 /*
20886 @@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
20887 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
20888 unsigned long start_pfn, end_pfn;
20889 pgd_t *pgd_base = swapper_pg_dir;
20890 - int pgd_idx, pmd_idx, pte_ofs;
20891 + unsigned int pgd_idx, pmd_idx, pte_ofs;
20892 unsigned long pfn;
20893 pgd_t *pgd;
20894 + pud_t *pud;
20895 pmd_t *pmd;
20896 pte_t *pte;
20897 unsigned pages_2m, pages_4k;
20898 @@ -278,8 +279,13 @@ repeat:
20899 pfn = start_pfn;
20900 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20901 pgd = pgd_base + pgd_idx;
20902 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
20903 - pmd = one_md_table_init(pgd);
20904 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
20905 + pud = pud_offset(pgd, 0);
20906 + pmd = pmd_offset(pud, 0);
20907 +
20908 +#ifdef CONFIG_X86_PAE
20909 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
20910 +#endif
20911
20912 if (pfn >= end_pfn)
20913 continue;
20914 @@ -291,14 +297,13 @@ repeat:
20915 #endif
20916 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
20917 pmd++, pmd_idx++) {
20918 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
20919 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
20920
20921 /*
20922 * Map with big pages if possible, otherwise
20923 * create normal page tables:
20924 */
20925 if (use_pse) {
20926 - unsigned int addr2;
20927 pgprot_t prot = PAGE_KERNEL_LARGE;
20928 /*
20929 * first pass will use the same initial
20930 @@ -308,11 +313,7 @@ repeat:
20931 __pgprot(PTE_IDENT_ATTR |
20932 _PAGE_PSE);
20933
20934 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
20935 - PAGE_OFFSET + PAGE_SIZE-1;
20936 -
20937 - if (is_kernel_text(addr) ||
20938 - is_kernel_text(addr2))
20939 + if (is_kernel_text(address, address + PMD_SIZE))
20940 prot = PAGE_KERNEL_LARGE_EXEC;
20941
20942 pages_2m++;
20943 @@ -329,7 +330,7 @@ repeat:
20944 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
20945 pte += pte_ofs;
20946 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
20947 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
20948 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
20949 pgprot_t prot = PAGE_KERNEL;
20950 /*
20951 * first pass will use the same initial
20952 @@ -337,7 +338,7 @@ repeat:
20953 */
20954 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
20955
20956 - if (is_kernel_text(addr))
20957 + if (is_kernel_text(address, address + PAGE_SIZE))
20958 prot = PAGE_KERNEL_EXEC;
20959
20960 pages_4k++;
20961 @@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
20962
20963 pud = pud_offset(pgd, va);
20964 pmd = pmd_offset(pud, va);
20965 - if (!pmd_present(*pmd))
20966 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
20967 break;
20968
20969 pte = pte_offset_kernel(pmd, va);
20970 @@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
20971
20972 static void __init pagetable_init(void)
20973 {
20974 - pgd_t *pgd_base = swapper_pg_dir;
20975 -
20976 - permanent_kmaps_init(pgd_base);
20977 + permanent_kmaps_init(swapper_pg_dir);
20978 }
20979
20980 #ifdef CONFIG_ACPI_SLEEP
20981 @@ -551,12 +550,12 @@ static void __init pagetable_init(void)
20982 * ACPI suspend needs this for resume, because things like the intel-agp
20983 * driver might have split up a kernel 4MB mapping.
20984 */
20985 -char swsusp_pg_dir[PAGE_SIZE]
20986 +pgd_t swsusp_pg_dir[PTRS_PER_PGD]
20987 __attribute__ ((aligned(PAGE_SIZE)));
20988
20989 static inline void save_pg_dir(void)
20990 {
20991 - memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
20992 + clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
20993 }
20994 #else /* !CONFIG_ACPI_SLEEP */
20995 static inline void save_pg_dir(void)
20996 @@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
20997 flush_tlb_all();
20998 }
20999
21000 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21001 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21002 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21003
21004 /* user-defined highmem size */
21005 @@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
21006 * Initialize the boot-time allocator (with low memory only):
21007 */
21008 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
21009 - bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21010 + bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21011 PAGE_SIZE);
21012 if (bootmap == -1L)
21013 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
21014 @@ -864,6 +863,12 @@ void __init mem_init(void)
21015
21016 pci_iommu_alloc();
21017
21018 +#ifdef CONFIG_PAX_PER_CPU_PGD
21019 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21020 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21021 + KERNEL_PGD_PTRS);
21022 +#endif
21023 +
21024 #ifdef CONFIG_FLATMEM
21025 BUG_ON(!mem_map);
21026 #endif
21027 @@ -881,7 +886,7 @@ void __init mem_init(void)
21028 set_highmem_pages_init();
21029
21030 codesize = (unsigned long) &_etext - (unsigned long) &_text;
21031 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
21032 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
21033 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
21034
21035 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
21036 @@ -923,10 +928,10 @@ void __init mem_init(void)
21037 ((unsigned long)&__init_end -
21038 (unsigned long)&__init_begin) >> 10,
21039
21040 - (unsigned long)&_etext, (unsigned long)&_edata,
21041 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
21042 + (unsigned long)&_sdata, (unsigned long)&_edata,
21043 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
21044
21045 - (unsigned long)&_text, (unsigned long)&_etext,
21046 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21047 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21048
21049 /*
21050 @@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
21051 if (!kernel_set_to_readonly)
21052 return;
21053
21054 + start = ktla_ktva(start);
21055 pr_debug("Set kernel text: %lx - %lx for read write\n",
21056 start, start+size);
21057
21058 @@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
21059 if (!kernel_set_to_readonly)
21060 return;
21061
21062 + start = ktla_ktva(start);
21063 pr_debug("Set kernel text: %lx - %lx for read only\n",
21064 start, start+size);
21065
21066 @@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
21067 unsigned long start = PFN_ALIGN(_text);
21068 unsigned long size = PFN_ALIGN(_etext) - start;
21069
21070 + start = ktla_ktva(start);
21071 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21072 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21073 size >> 10);
21074 diff -urNp linux-2.6.32.42/arch/x86/mm/init_64.c linux-2.6.32.42/arch/x86/mm/init_64.c
21075 --- linux-2.6.32.42/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
21076 +++ linux-2.6.32.42/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
21077 @@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21078 pmd = fill_pmd(pud, vaddr);
21079 pte = fill_pte(pmd, vaddr);
21080
21081 + pax_open_kernel();
21082 set_pte(pte, new_pte);
21083 + pax_close_kernel();
21084
21085 /*
21086 * It's enough to flush this one mapping.
21087 @@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
21088 pgd = pgd_offset_k((unsigned long)__va(phys));
21089 if (pgd_none(*pgd)) {
21090 pud = (pud_t *) spp_getpage();
21091 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21092 - _PAGE_USER));
21093 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21094 }
21095 pud = pud_offset(pgd, (unsigned long)__va(phys));
21096 if (pud_none(*pud)) {
21097 pmd = (pmd_t *) spp_getpage();
21098 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21099 - _PAGE_USER));
21100 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21101 }
21102 pmd = pmd_offset(pud, phys);
21103 BUG_ON(!pmd_none(*pmd));
21104 @@ -675,6 +675,12 @@ void __init mem_init(void)
21105
21106 pci_iommu_alloc();
21107
21108 +#ifdef CONFIG_PAX_PER_CPU_PGD
21109 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21110 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21111 + KERNEL_PGD_PTRS);
21112 +#endif
21113 +
21114 /* clear_bss() already clear the empty_zero_page */
21115
21116 reservedpages = 0;
21117 @@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
21118 static struct vm_area_struct gate_vma = {
21119 .vm_start = VSYSCALL_START,
21120 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21121 - .vm_page_prot = PAGE_READONLY_EXEC,
21122 - .vm_flags = VM_READ | VM_EXEC
21123 + .vm_page_prot = PAGE_READONLY,
21124 + .vm_flags = VM_READ
21125 };
21126
21127 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
21128 @@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
21129
21130 const char *arch_vma_name(struct vm_area_struct *vma)
21131 {
21132 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21133 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21134 return "[vdso]";
21135 if (vma == &gate_vma)
21136 return "[vsyscall]";
21137 diff -urNp linux-2.6.32.42/arch/x86/mm/init.c linux-2.6.32.42/arch/x86/mm/init.c
21138 --- linux-2.6.32.42/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
21139 +++ linux-2.6.32.42/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
21140 @@ -69,11 +69,7 @@ static void __init find_early_table_spac
21141 * cause a hotspot and fill up ZONE_DMA. The page tables
21142 * need roughly 0.5KB per GB.
21143 */
21144 -#ifdef CONFIG_X86_32
21145 - start = 0x7000;
21146 -#else
21147 - start = 0x8000;
21148 -#endif
21149 + start = 0x100000;
21150 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
21151 tables, PAGE_SIZE);
21152 if (e820_table_start == -1UL)
21153 @@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
21154 #endif
21155
21156 set_nx();
21157 - if (nx_enabled)
21158 + if (nx_enabled && cpu_has_nx)
21159 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
21160
21161 /* Enable PSE if available */
21162 @@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
21163 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
21164 * mmio resources as well as potential bios/acpi data regions.
21165 */
21166 +
21167 int devmem_is_allowed(unsigned long pagenr)
21168 {
21169 +#ifdef CONFIG_GRKERNSEC_KMEM
21170 + /* allow BDA */
21171 + if (!pagenr)
21172 + return 1;
21173 + /* allow EBDA */
21174 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
21175 + return 1;
21176 + /* allow ISA/video mem */
21177 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21178 + return 1;
21179 + /* throw out everything else below 1MB */
21180 + if (pagenr <= 256)
21181 + return 0;
21182 +#else
21183 if (pagenr <= 256)
21184 return 1;
21185 +#endif
21186 +
21187 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21188 return 0;
21189 if (!page_is_ram(pagenr))
21190 @@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
21191
21192 void free_initmem(void)
21193 {
21194 +
21195 +#ifdef CONFIG_PAX_KERNEXEC
21196 +#ifdef CONFIG_X86_32
21197 + /* PaX: limit KERNEL_CS to actual size */
21198 + unsigned long addr, limit;
21199 + struct desc_struct d;
21200 + int cpu;
21201 +
21202 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21203 + limit = (limit - 1UL) >> PAGE_SHIFT;
21204 +
21205 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21206 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
21207 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21208 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
21209 + }
21210 +
21211 + /* PaX: make KERNEL_CS read-only */
21212 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
21213 + if (!paravirt_enabled())
21214 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
21215 +/*
21216 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
21217 + pgd = pgd_offset_k(addr);
21218 + pud = pud_offset(pgd, addr);
21219 + pmd = pmd_offset(pud, addr);
21220 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21221 + }
21222 +*/
21223 +#ifdef CONFIG_X86_PAE
21224 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
21225 +/*
21226 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
21227 + pgd = pgd_offset_k(addr);
21228 + pud = pud_offset(pgd, addr);
21229 + pmd = pmd_offset(pud, addr);
21230 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21231 + }
21232 +*/
21233 +#endif
21234 +
21235 +#ifdef CONFIG_MODULES
21236 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
21237 +#endif
21238 +
21239 +#else
21240 + pgd_t *pgd;
21241 + pud_t *pud;
21242 + pmd_t *pmd;
21243 + unsigned long addr, end;
21244 +
21245 + /* PaX: make kernel code/rodata read-only, rest non-executable */
21246 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
21247 + pgd = pgd_offset_k(addr);
21248 + pud = pud_offset(pgd, addr);
21249 + pmd = pmd_offset(pud, addr);
21250 + if (!pmd_present(*pmd))
21251 + continue;
21252 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
21253 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21254 + else
21255 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21256 + }
21257 +
21258 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
21259 + end = addr + KERNEL_IMAGE_SIZE;
21260 + for (; addr < end; addr += PMD_SIZE) {
21261 + pgd = pgd_offset_k(addr);
21262 + pud = pud_offset(pgd, addr);
21263 + pmd = pmd_offset(pud, addr);
21264 + if (!pmd_present(*pmd))
21265 + continue;
21266 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
21267 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21268 + }
21269 +#endif
21270 +
21271 + flush_tlb_all();
21272 +#endif
21273 +
21274 free_init_pages("unused kernel memory",
21275 (unsigned long)(&__init_begin),
21276 (unsigned long)(&__init_end));
21277 diff -urNp linux-2.6.32.42/arch/x86/mm/iomap_32.c linux-2.6.32.42/arch/x86/mm/iomap_32.c
21278 --- linux-2.6.32.42/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
21279 +++ linux-2.6.32.42/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
21280 @@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
21281 debug_kmap_atomic(type);
21282 idx = type + KM_TYPE_NR * smp_processor_id();
21283 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21284 +
21285 + pax_open_kernel();
21286 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
21287 + pax_close_kernel();
21288 +
21289 arch_flush_lazy_mmu_mode();
21290
21291 return (void *)vaddr;
21292 diff -urNp linux-2.6.32.42/arch/x86/mm/ioremap.c linux-2.6.32.42/arch/x86/mm/ioremap.c
21293 --- linux-2.6.32.42/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
21294 +++ linux-2.6.32.42/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
21295 @@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
21296 * Second special case: Some BIOSen report the PC BIOS
21297 * area (640->1Mb) as ram even though it is not.
21298 */
21299 - if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
21300 - pagenr < (BIOS_END >> PAGE_SHIFT))
21301 + if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
21302 + pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21303 return 0;
21304
21305 for (i = 0; i < e820.nr_map; i++) {
21306 @@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
21307 /*
21308 * Don't allow anybody to remap normal RAM that we're using..
21309 */
21310 - for (pfn = phys_addr >> PAGE_SHIFT;
21311 - (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
21312 - pfn++) {
21313 -
21314 + for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
21315 int is_ram = page_is_ram(pfn);
21316
21317 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
21318 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
21319 return NULL;
21320 WARN_ON_ONCE(is_ram);
21321 }
21322 @@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
21323 early_param("early_ioremap_debug", early_ioremap_debug_setup);
21324
21325 static __initdata int after_paging_init;
21326 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
21327 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
21328
21329 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
21330 {
21331 @@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
21332 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
21333
21334 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
21335 - memset(bm_pte, 0, sizeof(bm_pte));
21336 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
21337 + pmd_populate_user(&init_mm, pmd, bm_pte);
21338
21339 /*
21340 * The boot-ioremap range spans multiple pmds, for which
21341 diff -urNp linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c
21342 --- linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
21343 +++ linux-2.6.32.42/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
21344 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
21345 * memory (e.g. tracked pages)? For now, we need this to avoid
21346 * invoking kmemcheck for PnP BIOS calls.
21347 */
21348 - if (regs->flags & X86_VM_MASK)
21349 + if (v8086_mode(regs))
21350 return false;
21351 - if (regs->cs != __KERNEL_CS)
21352 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
21353 return false;
21354
21355 pte = kmemcheck_pte_lookup(address);
21356 diff -urNp linux-2.6.32.42/arch/x86/mm/mmap.c linux-2.6.32.42/arch/x86/mm/mmap.c
21357 --- linux-2.6.32.42/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
21358 +++ linux-2.6.32.42/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
21359 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
21360 * Leave an at least ~128 MB hole with possible stack randomization.
21361 */
21362 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
21363 -#define MAX_GAP (TASK_SIZE/6*5)
21364 +#define MAX_GAP (pax_task_size/6*5)
21365
21366 /*
21367 * True on X86_32 or when emulating IA32 on X86_64
21368 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
21369 return rnd << PAGE_SHIFT;
21370 }
21371
21372 -static unsigned long mmap_base(void)
21373 +static unsigned long mmap_base(struct mm_struct *mm)
21374 {
21375 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
21376 + unsigned long pax_task_size = TASK_SIZE;
21377 +
21378 +#ifdef CONFIG_PAX_SEGMEXEC
21379 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21380 + pax_task_size = SEGMEXEC_TASK_SIZE;
21381 +#endif
21382
21383 if (gap < MIN_GAP)
21384 gap = MIN_GAP;
21385 else if (gap > MAX_GAP)
21386 gap = MAX_GAP;
21387
21388 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
21389 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
21390 }
21391
21392 /*
21393 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
21394 * does, but not when emulating X86_32
21395 */
21396 -static unsigned long mmap_legacy_base(void)
21397 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
21398 {
21399 - if (mmap_is_ia32())
21400 + if (mmap_is_ia32()) {
21401 +
21402 +#ifdef CONFIG_PAX_SEGMEXEC
21403 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
21404 + return SEGMEXEC_TASK_UNMAPPED_BASE;
21405 + else
21406 +#endif
21407 +
21408 return TASK_UNMAPPED_BASE;
21409 - else
21410 + } else
21411 return TASK_UNMAPPED_BASE + mmap_rnd();
21412 }
21413
21414 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
21415 void arch_pick_mmap_layout(struct mm_struct *mm)
21416 {
21417 if (mmap_is_legacy()) {
21418 - mm->mmap_base = mmap_legacy_base();
21419 + mm->mmap_base = mmap_legacy_base(mm);
21420 +
21421 +#ifdef CONFIG_PAX_RANDMMAP
21422 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21423 + mm->mmap_base += mm->delta_mmap;
21424 +#endif
21425 +
21426 mm->get_unmapped_area = arch_get_unmapped_area;
21427 mm->unmap_area = arch_unmap_area;
21428 } else {
21429 - mm->mmap_base = mmap_base();
21430 + mm->mmap_base = mmap_base(mm);
21431 +
21432 +#ifdef CONFIG_PAX_RANDMMAP
21433 + if (mm->pax_flags & MF_PAX_RANDMMAP)
21434 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
21435 +#endif
21436 +
21437 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
21438 mm->unmap_area = arch_unmap_area_topdown;
21439 }
21440 diff -urNp linux-2.6.32.42/arch/x86/mm/mmio-mod.c linux-2.6.32.42/arch/x86/mm/mmio-mod.c
21441 --- linux-2.6.32.42/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
21442 +++ linux-2.6.32.42/arch/x86/mm/mmio-mod.c 2011-05-04 17:56:28.000000000 -0400
21443 @@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
21444 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
21445 void __iomem *addr)
21446 {
21447 - static atomic_t next_id;
21448 + static atomic_unchecked_t next_id;
21449 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
21450 /* These are page-unaligned. */
21451 struct mmiotrace_map map = {
21452 @@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
21453 .private = trace
21454 },
21455 .phys = offset,
21456 - .id = atomic_inc_return(&next_id)
21457 + .id = atomic_inc_return_unchecked(&next_id)
21458 };
21459 map.map_id = trace->id;
21460
21461 diff -urNp linux-2.6.32.42/arch/x86/mm/numa_32.c linux-2.6.32.42/arch/x86/mm/numa_32.c
21462 --- linux-2.6.32.42/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
21463 +++ linux-2.6.32.42/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
21464 @@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
21465 }
21466 #endif
21467
21468 -extern unsigned long find_max_low_pfn(void);
21469 extern unsigned long highend_pfn, highstart_pfn;
21470
21471 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
21472 diff -urNp linux-2.6.32.42/arch/x86/mm/pageattr.c linux-2.6.32.42/arch/x86/mm/pageattr.c
21473 --- linux-2.6.32.42/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
21474 +++ linux-2.6.32.42/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
21475 @@ -261,16 +261,17 @@ static inline pgprot_t static_protection
21476 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
21477 */
21478 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
21479 - pgprot_val(forbidden) |= _PAGE_NX;
21480 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21481
21482 /*
21483 * The kernel text needs to be executable for obvious reasons
21484 * Does not cover __inittext since that is gone later on. On
21485 * 64bit we do not enforce !NX on the low mapping
21486 */
21487 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
21488 - pgprot_val(forbidden) |= _PAGE_NX;
21489 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
21490 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21491
21492 +#ifdef CONFIG_DEBUG_RODATA
21493 /*
21494 * The .rodata section needs to be read-only. Using the pfn
21495 * catches all aliases.
21496 @@ -278,6 +279,14 @@ static inline pgprot_t static_protection
21497 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
21498 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
21499 pgprot_val(forbidden) |= _PAGE_RW;
21500 +#endif
21501 +
21502 +#ifdef CONFIG_PAX_KERNEXEC
21503 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
21504 + pgprot_val(forbidden) |= _PAGE_RW;
21505 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
21506 + }
21507 +#endif
21508
21509 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
21510
21511 @@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
21512 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
21513 {
21514 /* change init_mm */
21515 + pax_open_kernel();
21516 set_pte_atomic(kpte, pte);
21517 +
21518 #ifdef CONFIG_X86_32
21519 if (!SHARED_KERNEL_PMD) {
21520 +
21521 +#ifdef CONFIG_PAX_PER_CPU_PGD
21522 + unsigned long cpu;
21523 +#else
21524 struct page *page;
21525 +#endif
21526
21527 +#ifdef CONFIG_PAX_PER_CPU_PGD
21528 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
21529 + pgd_t *pgd = get_cpu_pgd(cpu);
21530 +#else
21531 list_for_each_entry(page, &pgd_list, lru) {
21532 - pgd_t *pgd;
21533 + pgd_t *pgd = (pgd_t *)page_address(page);
21534 +#endif
21535 +
21536 pud_t *pud;
21537 pmd_t *pmd;
21538
21539 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
21540 + pgd += pgd_index(address);
21541 pud = pud_offset(pgd, address);
21542 pmd = pmd_offset(pud, address);
21543 set_pte_atomic((pte_t *)pmd, pte);
21544 }
21545 }
21546 #endif
21547 + pax_close_kernel();
21548 }
21549
21550 static int
21551 diff -urNp linux-2.6.32.42/arch/x86/mm/pageattr-test.c linux-2.6.32.42/arch/x86/mm/pageattr-test.c
21552 --- linux-2.6.32.42/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
21553 +++ linux-2.6.32.42/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
21554 @@ -36,7 +36,7 @@ enum {
21555
21556 static int pte_testbit(pte_t pte)
21557 {
21558 - return pte_flags(pte) & _PAGE_UNUSED1;
21559 + return pte_flags(pte) & _PAGE_CPA_TEST;
21560 }
21561
21562 struct split_state {
21563 diff -urNp linux-2.6.32.42/arch/x86/mm/pat.c linux-2.6.32.42/arch/x86/mm/pat.c
21564 --- linux-2.6.32.42/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
21565 +++ linux-2.6.32.42/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
21566 @@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
21567
21568 conflict:
21569 printk(KERN_INFO "%s:%d conflicting memory types "
21570 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
21571 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
21572 new->end, cattr_name(new->type), cattr_name(entry->type));
21573 return -EBUSY;
21574 }
21575 @@ -559,7 +559,7 @@ unlock_ret:
21576
21577 if (err) {
21578 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
21579 - current->comm, current->pid, start, end);
21580 + current->comm, task_pid_nr(current), start, end);
21581 }
21582
21583 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
21584 @@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
21585 while (cursor < to) {
21586 if (!devmem_is_allowed(pfn)) {
21587 printk(KERN_INFO
21588 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
21589 - current->comm, from, to);
21590 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
21591 + current->comm, from, to, cursor);
21592 return 0;
21593 }
21594 cursor += PAGE_SIZE;
21595 @@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
21596 printk(KERN_INFO
21597 "%s:%d ioremap_change_attr failed %s "
21598 "for %Lx-%Lx\n",
21599 - current->comm, current->pid,
21600 + current->comm, task_pid_nr(current),
21601 cattr_name(flags),
21602 base, (unsigned long long)(base + size));
21603 return -EINVAL;
21604 @@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
21605 free_memtype(paddr, paddr + size);
21606 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
21607 " for %Lx-%Lx, got %s\n",
21608 - current->comm, current->pid,
21609 + current->comm, task_pid_nr(current),
21610 cattr_name(want_flags),
21611 (unsigned long long)paddr,
21612 (unsigned long long)(paddr + size),
21613 diff -urNp linux-2.6.32.42/arch/x86/mm/pgtable_32.c linux-2.6.32.42/arch/x86/mm/pgtable_32.c
21614 --- linux-2.6.32.42/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
21615 +++ linux-2.6.32.42/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
21616 @@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
21617 return;
21618 }
21619 pte = pte_offset_kernel(pmd, vaddr);
21620 +
21621 + pax_open_kernel();
21622 if (pte_val(pteval))
21623 set_pte_at(&init_mm, vaddr, pte, pteval);
21624 else
21625 pte_clear(&init_mm, vaddr, pte);
21626 + pax_close_kernel();
21627
21628 /*
21629 * It's enough to flush this one mapping.
21630 diff -urNp linux-2.6.32.42/arch/x86/mm/pgtable.c linux-2.6.32.42/arch/x86/mm/pgtable.c
21631 --- linux-2.6.32.42/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
21632 +++ linux-2.6.32.42/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
21633 @@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
21634 list_del(&page->lru);
21635 }
21636
21637 -#define UNSHARED_PTRS_PER_PGD \
21638 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21639 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21640 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
21641
21642 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21643 +{
21644 + while (count--)
21645 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
21646 +}
21647 +#endif
21648 +
21649 +#ifdef CONFIG_PAX_PER_CPU_PGD
21650 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
21651 +{
21652 + while (count--)
21653 +
21654 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21655 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
21656 +#else
21657 + *dst++ = *src++;
21658 +#endif
21659 +
21660 +}
21661 +#endif
21662 +
21663 +#ifdef CONFIG_X86_64
21664 +#define pxd_t pud_t
21665 +#define pyd_t pgd_t
21666 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
21667 +#define pxd_free(mm, pud) pud_free((mm), (pud))
21668 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
21669 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
21670 +#define PYD_SIZE PGDIR_SIZE
21671 +#else
21672 +#define pxd_t pmd_t
21673 +#define pyd_t pud_t
21674 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
21675 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
21676 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
21677 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
21678 +#define PYD_SIZE PUD_SIZE
21679 +#endif
21680 +
21681 +#ifdef CONFIG_PAX_PER_CPU_PGD
21682 +static inline void pgd_ctor(pgd_t *pgd) {}
21683 +static inline void pgd_dtor(pgd_t *pgd) {}
21684 +#else
21685 static void pgd_ctor(pgd_t *pgd)
21686 {
21687 /* If the pgd points to a shared pagetable level (either the
21688 @@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
21689 pgd_list_del(pgd);
21690 spin_unlock_irqrestore(&pgd_lock, flags);
21691 }
21692 +#endif
21693
21694 /*
21695 * List of all pgd's needed for non-PAE so it can invalidate entries
21696 @@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
21697 * -- wli
21698 */
21699
21700 -#ifdef CONFIG_X86_PAE
21701 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21702 /*
21703 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
21704 * updating the top-level pagetable entries to guarantee the
21705 @@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
21706 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
21707 * and initialize the kernel pmds here.
21708 */
21709 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
21710 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
21711
21712 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
21713 {
21714 @@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
21715 */
21716 flush_tlb_mm(mm);
21717 }
21718 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
21719 +#define PREALLOCATED_PXDS USER_PGD_PTRS
21720 #else /* !CONFIG_X86_PAE */
21721
21722 /* No need to prepopulate any pagetable entries in non-PAE modes. */
21723 -#define PREALLOCATED_PMDS 0
21724 +#define PREALLOCATED_PXDS 0
21725
21726 #endif /* CONFIG_X86_PAE */
21727
21728 -static void free_pmds(pmd_t *pmds[])
21729 +static void free_pxds(pxd_t *pxds[])
21730 {
21731 int i;
21732
21733 - for(i = 0; i < PREALLOCATED_PMDS; i++)
21734 - if (pmds[i])
21735 - free_page((unsigned long)pmds[i]);
21736 + for(i = 0; i < PREALLOCATED_PXDS; i++)
21737 + if (pxds[i])
21738 + free_page((unsigned long)pxds[i]);
21739 }
21740
21741 -static int preallocate_pmds(pmd_t *pmds[])
21742 +static int preallocate_pxds(pxd_t *pxds[])
21743 {
21744 int i;
21745 bool failed = false;
21746
21747 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
21748 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
21749 - if (pmd == NULL)
21750 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
21751 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
21752 + if (pxd == NULL)
21753 failed = true;
21754 - pmds[i] = pmd;
21755 + pxds[i] = pxd;
21756 }
21757
21758 if (failed) {
21759 - free_pmds(pmds);
21760 + free_pxds(pxds);
21761 return -ENOMEM;
21762 }
21763
21764 @@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
21765 * preallocate which never got a corresponding vma will need to be
21766 * freed manually.
21767 */
21768 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
21769 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
21770 {
21771 int i;
21772
21773 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
21774 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
21775 pgd_t pgd = pgdp[i];
21776
21777 if (pgd_val(pgd) != 0) {
21778 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
21779 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
21780
21781 - pgdp[i] = native_make_pgd(0);
21782 + set_pgd(pgdp + i, native_make_pgd(0));
21783
21784 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
21785 - pmd_free(mm, pmd);
21786 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
21787 + pxd_free(mm, pxd);
21788 }
21789 }
21790 }
21791
21792 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
21793 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
21794 {
21795 - pud_t *pud;
21796 + pyd_t *pyd;
21797 unsigned long addr;
21798 int i;
21799
21800 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
21801 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
21802 return;
21803
21804 - pud = pud_offset(pgd, 0);
21805 +#ifdef CONFIG_X86_64
21806 + pyd = pyd_offset(mm, 0L);
21807 +#else
21808 + pyd = pyd_offset(pgd, 0L);
21809 +#endif
21810
21811 - for (addr = i = 0; i < PREALLOCATED_PMDS;
21812 - i++, pud++, addr += PUD_SIZE) {
21813 - pmd_t *pmd = pmds[i];
21814 + for (addr = i = 0; i < PREALLOCATED_PXDS;
21815 + i++, pyd++, addr += PYD_SIZE) {
21816 + pxd_t *pxd = pxds[i];
21817
21818 if (i >= KERNEL_PGD_BOUNDARY)
21819 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21820 - sizeof(pmd_t) * PTRS_PER_PMD);
21821 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
21822 + sizeof(pxd_t) * PTRS_PER_PMD);
21823
21824 - pud_populate(mm, pud, pmd);
21825 + pyd_populate(mm, pyd, pxd);
21826 }
21827 }
21828
21829 pgd_t *pgd_alloc(struct mm_struct *mm)
21830 {
21831 pgd_t *pgd;
21832 - pmd_t *pmds[PREALLOCATED_PMDS];
21833 + pxd_t *pxds[PREALLOCATED_PXDS];
21834 +
21835 unsigned long flags;
21836
21837 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
21838 @@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21839
21840 mm->pgd = pgd;
21841
21842 - if (preallocate_pmds(pmds) != 0)
21843 + if (preallocate_pxds(pxds) != 0)
21844 goto out_free_pgd;
21845
21846 if (paravirt_pgd_alloc(mm) != 0)
21847 - goto out_free_pmds;
21848 + goto out_free_pxds;
21849
21850 /*
21851 * Make sure that pre-populating the pmds is atomic with
21852 @@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
21853 spin_lock_irqsave(&pgd_lock, flags);
21854
21855 pgd_ctor(pgd);
21856 - pgd_prepopulate_pmd(mm, pgd, pmds);
21857 + pgd_prepopulate_pxd(mm, pgd, pxds);
21858
21859 spin_unlock_irqrestore(&pgd_lock, flags);
21860
21861 return pgd;
21862
21863 -out_free_pmds:
21864 - free_pmds(pmds);
21865 +out_free_pxds:
21866 + free_pxds(pxds);
21867 out_free_pgd:
21868 free_page((unsigned long)pgd);
21869 out:
21870 @@ -287,7 +338,7 @@ out:
21871
21872 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
21873 {
21874 - pgd_mop_up_pmds(mm, pgd);
21875 + pgd_mop_up_pxds(mm, pgd);
21876 pgd_dtor(pgd);
21877 paravirt_pgd_free(mm, pgd);
21878 free_page((unsigned long)pgd);
21879 diff -urNp linux-2.6.32.42/arch/x86/mm/setup_nx.c linux-2.6.32.42/arch/x86/mm/setup_nx.c
21880 --- linux-2.6.32.42/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
21881 +++ linux-2.6.32.42/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
21882 @@ -4,11 +4,10 @@
21883
21884 #include <asm/pgtable.h>
21885
21886 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21887 int nx_enabled;
21888
21889 -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21890 -static int disable_nx __cpuinitdata;
21891 -
21892 +#ifndef CONFIG_PAX_PAGEEXEC
21893 /*
21894 * noexec = on|off
21895 *
21896 @@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
21897 if (!str)
21898 return -EINVAL;
21899 if (!strncmp(str, "on", 2)) {
21900 - __supported_pte_mask |= _PAGE_NX;
21901 - disable_nx = 0;
21902 + nx_enabled = 1;
21903 } else if (!strncmp(str, "off", 3)) {
21904 - disable_nx = 1;
21905 - __supported_pte_mask &= ~_PAGE_NX;
21906 + nx_enabled = 0;
21907 }
21908 return 0;
21909 }
21910 early_param("noexec", noexec_setup);
21911 #endif
21912 +#endif
21913
21914 #ifdef CONFIG_X86_PAE
21915 void __init set_nx(void)
21916 {
21917 - unsigned int v[4], l, h;
21918 + if (!nx_enabled && cpu_has_nx) {
21919 + unsigned l, h;
21920
21921 - if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
21922 - cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
21923 -
21924 - if ((v[3] & (1 << 20)) && !disable_nx) {
21925 - rdmsr(MSR_EFER, l, h);
21926 - l |= EFER_NX;
21927 - wrmsr(MSR_EFER, l, h);
21928 - nx_enabled = 1;
21929 - __supported_pte_mask |= _PAGE_NX;
21930 - }
21931 + __supported_pte_mask &= ~_PAGE_NX;
21932 + rdmsr(MSR_EFER, l, h);
21933 + l &= ~EFER_NX;
21934 + wrmsr(MSR_EFER, l, h);
21935 }
21936 }
21937 #else
21938 @@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
21939 unsigned long efer;
21940
21941 rdmsrl(MSR_EFER, efer);
21942 - if (!(efer & EFER_NX) || disable_nx)
21943 + if (!(efer & EFER_NX) || !nx_enabled)
21944 __supported_pte_mask &= ~_PAGE_NX;
21945 }
21946 #endif
21947 diff -urNp linux-2.6.32.42/arch/x86/mm/tlb.c linux-2.6.32.42/arch/x86/mm/tlb.c
21948 --- linux-2.6.32.42/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
21949 +++ linux-2.6.32.42/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
21950 @@ -61,7 +61,11 @@ void leave_mm(int cpu)
21951 BUG();
21952 cpumask_clear_cpu(cpu,
21953 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
21954 +
21955 +#ifndef CONFIG_PAX_PER_CPU_PGD
21956 load_cr3(swapper_pg_dir);
21957 +#endif
21958 +
21959 }
21960 EXPORT_SYMBOL_GPL(leave_mm);
21961
21962 diff -urNp linux-2.6.32.42/arch/x86/oprofile/backtrace.c linux-2.6.32.42/arch/x86/oprofile/backtrace.c
21963 --- linux-2.6.32.42/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
21964 +++ linux-2.6.32.42/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
21965 @@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
21966 struct frame_head bufhead[2];
21967
21968 /* Also check accessibility of one struct frame_head beyond */
21969 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
21970 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
21971 return NULL;
21972 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
21973 return NULL;
21974 @@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
21975 {
21976 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
21977
21978 - if (!user_mode_vm(regs)) {
21979 + if (!user_mode(regs)) {
21980 unsigned long stack = kernel_stack_pointer(regs);
21981 if (depth)
21982 dump_trace(NULL, regs, (unsigned long *)stack, 0,
21983 diff -urNp linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c
21984 --- linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
21985 +++ linux-2.6.32.42/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
21986 @@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
21987 #endif
21988 }
21989
21990 -static int inline addr_increment(void)
21991 +static inline int addr_increment(void)
21992 {
21993 #ifdef CONFIG_SMP
21994 return smp_num_siblings == 2 ? 2 : 1;
21995 diff -urNp linux-2.6.32.42/arch/x86/pci/common.c linux-2.6.32.42/arch/x86/pci/common.c
21996 --- linux-2.6.32.42/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
21997 +++ linux-2.6.32.42/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
21998 @@ -31,8 +31,8 @@ int noioapicreroute = 1;
21999 int pcibios_last_bus = -1;
22000 unsigned long pirq_table_addr;
22001 struct pci_bus *pci_root_bus;
22002 -struct pci_raw_ops *raw_pci_ops;
22003 -struct pci_raw_ops *raw_pci_ext_ops;
22004 +const struct pci_raw_ops *raw_pci_ops;
22005 +const struct pci_raw_ops *raw_pci_ext_ops;
22006
22007 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
22008 int reg, int len, u32 *val)
22009 diff -urNp linux-2.6.32.42/arch/x86/pci/direct.c linux-2.6.32.42/arch/x86/pci/direct.c
22010 --- linux-2.6.32.42/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
22011 +++ linux-2.6.32.42/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
22012 @@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
22013
22014 #undef PCI_CONF1_ADDRESS
22015
22016 -struct pci_raw_ops pci_direct_conf1 = {
22017 +const struct pci_raw_ops pci_direct_conf1 = {
22018 .read = pci_conf1_read,
22019 .write = pci_conf1_write,
22020 };
22021 @@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
22022
22023 #undef PCI_CONF2_ADDRESS
22024
22025 -struct pci_raw_ops pci_direct_conf2 = {
22026 +const struct pci_raw_ops pci_direct_conf2 = {
22027 .read = pci_conf2_read,
22028 .write = pci_conf2_write,
22029 };
22030 @@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
22031 * This should be close to trivial, but it isn't, because there are buggy
22032 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
22033 */
22034 -static int __init pci_sanity_check(struct pci_raw_ops *o)
22035 +static int __init pci_sanity_check(const struct pci_raw_ops *o)
22036 {
22037 u32 x = 0;
22038 int year, devfn;
22039 diff -urNp linux-2.6.32.42/arch/x86/pci/mmconfig_32.c linux-2.6.32.42/arch/x86/pci/mmconfig_32.c
22040 --- linux-2.6.32.42/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
22041 +++ linux-2.6.32.42/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
22042 @@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
22043 return 0;
22044 }
22045
22046 -static struct pci_raw_ops pci_mmcfg = {
22047 +static const struct pci_raw_ops pci_mmcfg = {
22048 .read = pci_mmcfg_read,
22049 .write = pci_mmcfg_write,
22050 };
22051 diff -urNp linux-2.6.32.42/arch/x86/pci/mmconfig_64.c linux-2.6.32.42/arch/x86/pci/mmconfig_64.c
22052 --- linux-2.6.32.42/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
22053 +++ linux-2.6.32.42/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
22054 @@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
22055 return 0;
22056 }
22057
22058 -static struct pci_raw_ops pci_mmcfg = {
22059 +static const struct pci_raw_ops pci_mmcfg = {
22060 .read = pci_mmcfg_read,
22061 .write = pci_mmcfg_write,
22062 };
22063 diff -urNp linux-2.6.32.42/arch/x86/pci/numaq_32.c linux-2.6.32.42/arch/x86/pci/numaq_32.c
22064 --- linux-2.6.32.42/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
22065 +++ linux-2.6.32.42/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
22066 @@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
22067
22068 #undef PCI_CONF1_MQ_ADDRESS
22069
22070 -static struct pci_raw_ops pci_direct_conf1_mq = {
22071 +static const struct pci_raw_ops pci_direct_conf1_mq = {
22072 .read = pci_conf1_mq_read,
22073 .write = pci_conf1_mq_write
22074 };
22075 diff -urNp linux-2.6.32.42/arch/x86/pci/olpc.c linux-2.6.32.42/arch/x86/pci/olpc.c
22076 --- linux-2.6.32.42/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
22077 +++ linux-2.6.32.42/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
22078 @@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
22079 return 0;
22080 }
22081
22082 -static struct pci_raw_ops pci_olpc_conf = {
22083 +static const struct pci_raw_ops pci_olpc_conf = {
22084 .read = pci_olpc_read,
22085 .write = pci_olpc_write,
22086 };
22087 diff -urNp linux-2.6.32.42/arch/x86/pci/pcbios.c linux-2.6.32.42/arch/x86/pci/pcbios.c
22088 --- linux-2.6.32.42/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
22089 +++ linux-2.6.32.42/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
22090 @@ -56,50 +56,93 @@ union bios32 {
22091 static struct {
22092 unsigned long address;
22093 unsigned short segment;
22094 -} bios32_indirect = { 0, __KERNEL_CS };
22095 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22096
22097 /*
22098 * Returns the entry point for the given service, NULL on error
22099 */
22100
22101 -static unsigned long bios32_service(unsigned long service)
22102 +static unsigned long __devinit bios32_service(unsigned long service)
22103 {
22104 unsigned char return_code; /* %al */
22105 unsigned long address; /* %ebx */
22106 unsigned long length; /* %ecx */
22107 unsigned long entry; /* %edx */
22108 unsigned long flags;
22109 + struct desc_struct d, *gdt;
22110
22111 local_irq_save(flags);
22112 - __asm__("lcall *(%%edi); cld"
22113 +
22114 + gdt = get_cpu_gdt_table(smp_processor_id());
22115 +
22116 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
22117 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22118 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
22119 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22120 +
22121 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
22122 : "=a" (return_code),
22123 "=b" (address),
22124 "=c" (length),
22125 "=d" (entry)
22126 : "0" (service),
22127 "1" (0),
22128 - "D" (&bios32_indirect));
22129 + "D" (&bios32_indirect),
22130 + "r"(__PCIBIOS_DS)
22131 + : "memory");
22132 +
22133 + pax_open_kernel();
22134 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
22135 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
22136 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
22137 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
22138 + pax_close_kernel();
22139 +
22140 local_irq_restore(flags);
22141
22142 switch (return_code) {
22143 - case 0:
22144 - return address + entry;
22145 - case 0x80: /* Not present */
22146 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22147 - return 0;
22148 - default: /* Shouldn't happen */
22149 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22150 - service, return_code);
22151 + case 0: {
22152 + int cpu;
22153 + unsigned char flags;
22154 +
22155 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
22156 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
22157 + printk(KERN_WARNING "bios32_service: not valid\n");
22158 return 0;
22159 + }
22160 + address = address + PAGE_OFFSET;
22161 + length += 16UL; /* some BIOSs underreport this... */
22162 + flags = 4;
22163 + if (length >= 64*1024*1024) {
22164 + length >>= PAGE_SHIFT;
22165 + flags |= 8;
22166 + }
22167 +
22168 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
22169 + gdt = get_cpu_gdt_table(cpu);
22170 + pack_descriptor(&d, address, length, 0x9b, flags);
22171 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22172 + pack_descriptor(&d, address, length, 0x93, flags);
22173 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22174 + }
22175 + return entry;
22176 + }
22177 + case 0x80: /* Not present */
22178 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22179 + return 0;
22180 + default: /* Shouldn't happen */
22181 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22182 + service, return_code);
22183 + return 0;
22184 }
22185 }
22186
22187 static struct {
22188 unsigned long address;
22189 unsigned short segment;
22190 -} pci_indirect = { 0, __KERNEL_CS };
22191 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
22192
22193 -static int pci_bios_present;
22194 +static int pci_bios_present __read_only;
22195
22196 static int __devinit check_pcibios(void)
22197 {
22198 @@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
22199 unsigned long flags, pcibios_entry;
22200
22201 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
22202 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
22203 + pci_indirect.address = pcibios_entry;
22204
22205 local_irq_save(flags);
22206 - __asm__(
22207 - "lcall *(%%edi); cld\n\t"
22208 + __asm__("movw %w6, %%ds\n\t"
22209 + "lcall *%%ss:(%%edi); cld\n\t"
22210 + "push %%ss\n\t"
22211 + "pop %%ds\n\t"
22212 "jc 1f\n\t"
22213 "xor %%ah, %%ah\n"
22214 "1:"
22215 @@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
22216 "=b" (ebx),
22217 "=c" (ecx)
22218 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
22219 - "D" (&pci_indirect)
22220 + "D" (&pci_indirect),
22221 + "r" (__PCIBIOS_DS)
22222 : "memory");
22223 local_irq_restore(flags);
22224
22225 @@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
22226
22227 switch (len) {
22228 case 1:
22229 - __asm__("lcall *(%%esi); cld\n\t"
22230 + __asm__("movw %w6, %%ds\n\t"
22231 + "lcall *%%ss:(%%esi); cld\n\t"
22232 + "push %%ss\n\t"
22233 + "pop %%ds\n\t"
22234 "jc 1f\n\t"
22235 "xor %%ah, %%ah\n"
22236 "1:"
22237 @@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
22238 : "1" (PCIBIOS_READ_CONFIG_BYTE),
22239 "b" (bx),
22240 "D" ((long)reg),
22241 - "S" (&pci_indirect));
22242 + "S" (&pci_indirect),
22243 + "r" (__PCIBIOS_DS));
22244 /*
22245 * Zero-extend the result beyond 8 bits, do not trust the
22246 * BIOS having done it:
22247 @@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
22248 *value &= 0xff;
22249 break;
22250 case 2:
22251 - __asm__("lcall *(%%esi); cld\n\t"
22252 + __asm__("movw %w6, %%ds\n\t"
22253 + "lcall *%%ss:(%%esi); cld\n\t"
22254 + "push %%ss\n\t"
22255 + "pop %%ds\n\t"
22256 "jc 1f\n\t"
22257 "xor %%ah, %%ah\n"
22258 "1:"
22259 @@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
22260 : "1" (PCIBIOS_READ_CONFIG_WORD),
22261 "b" (bx),
22262 "D" ((long)reg),
22263 - "S" (&pci_indirect));
22264 + "S" (&pci_indirect),
22265 + "r" (__PCIBIOS_DS));
22266 /*
22267 * Zero-extend the result beyond 16 bits, do not trust the
22268 * BIOS having done it:
22269 @@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
22270 *value &= 0xffff;
22271 break;
22272 case 4:
22273 - __asm__("lcall *(%%esi); cld\n\t"
22274 + __asm__("movw %w6, %%ds\n\t"
22275 + "lcall *%%ss:(%%esi); cld\n\t"
22276 + "push %%ss\n\t"
22277 + "pop %%ds\n\t"
22278 "jc 1f\n\t"
22279 "xor %%ah, %%ah\n"
22280 "1:"
22281 @@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
22282 : "1" (PCIBIOS_READ_CONFIG_DWORD),
22283 "b" (bx),
22284 "D" ((long)reg),
22285 - "S" (&pci_indirect));
22286 + "S" (&pci_indirect),
22287 + "r" (__PCIBIOS_DS));
22288 break;
22289 }
22290
22291 @@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
22292
22293 switch (len) {
22294 case 1:
22295 - __asm__("lcall *(%%esi); cld\n\t"
22296 + __asm__("movw %w6, %%ds\n\t"
22297 + "lcall *%%ss:(%%esi); cld\n\t"
22298 + "push %%ss\n\t"
22299 + "pop %%ds\n\t"
22300 "jc 1f\n\t"
22301 "xor %%ah, %%ah\n"
22302 "1:"
22303 @@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
22304 "c" (value),
22305 "b" (bx),
22306 "D" ((long)reg),
22307 - "S" (&pci_indirect));
22308 + "S" (&pci_indirect),
22309 + "r" (__PCIBIOS_DS));
22310 break;
22311 case 2:
22312 - __asm__("lcall *(%%esi); cld\n\t"
22313 + __asm__("movw %w6, %%ds\n\t"
22314 + "lcall *%%ss:(%%esi); cld\n\t"
22315 + "push %%ss\n\t"
22316 + "pop %%ds\n\t"
22317 "jc 1f\n\t"
22318 "xor %%ah, %%ah\n"
22319 "1:"
22320 @@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
22321 "c" (value),
22322 "b" (bx),
22323 "D" ((long)reg),
22324 - "S" (&pci_indirect));
22325 + "S" (&pci_indirect),
22326 + "r" (__PCIBIOS_DS));
22327 break;
22328 case 4:
22329 - __asm__("lcall *(%%esi); cld\n\t"
22330 + __asm__("movw %w6, %%ds\n\t"
22331 + "lcall *%%ss:(%%esi); cld\n\t"
22332 + "push %%ss\n\t"
22333 + "pop %%ds\n\t"
22334 "jc 1f\n\t"
22335 "xor %%ah, %%ah\n"
22336 "1:"
22337 @@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
22338 "c" (value),
22339 "b" (bx),
22340 "D" ((long)reg),
22341 - "S" (&pci_indirect));
22342 + "S" (&pci_indirect),
22343 + "r" (__PCIBIOS_DS));
22344 break;
22345 }
22346
22347 @@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
22348 * Function table for BIOS32 access
22349 */
22350
22351 -static struct pci_raw_ops pci_bios_access = {
22352 +static const struct pci_raw_ops pci_bios_access = {
22353 .read = pci_bios_read,
22354 .write = pci_bios_write
22355 };
22356 @@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
22357 * Try to find PCI BIOS.
22358 */
22359
22360 -static struct pci_raw_ops * __devinit pci_find_bios(void)
22361 +static const struct pci_raw_ops * __devinit pci_find_bios(void)
22362 {
22363 union bios32 *check;
22364 unsigned char sum;
22365 @@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
22366
22367 DBG("PCI: Fetching IRQ routing table... ");
22368 __asm__("push %%es\n\t"
22369 + "movw %w8, %%ds\n\t"
22370 "push %%ds\n\t"
22371 "pop %%es\n\t"
22372 - "lcall *(%%esi); cld\n\t"
22373 + "lcall *%%ss:(%%esi); cld\n\t"
22374 "pop %%es\n\t"
22375 + "push %%ss\n\t"
22376 + "pop %%ds\n"
22377 "jc 1f\n\t"
22378 "xor %%ah, %%ah\n"
22379 "1:"
22380 @@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
22381 "1" (0),
22382 "D" ((long) &opt),
22383 "S" (&pci_indirect),
22384 - "m" (opt)
22385 + "m" (opt),
22386 + "r" (__PCIBIOS_DS)
22387 : "memory");
22388 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
22389 if (ret & 0xff00)
22390 @@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
22391 {
22392 int ret;
22393
22394 - __asm__("lcall *(%%esi); cld\n\t"
22395 + __asm__("movw %w5, %%ds\n\t"
22396 + "lcall *%%ss:(%%esi); cld\n\t"
22397 + "push %%ss\n\t"
22398 + "pop %%ds\n"
22399 "jc 1f\n\t"
22400 "xor %%ah, %%ah\n"
22401 "1:"
22402 @@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
22403 : "0" (PCIBIOS_SET_PCI_HW_INT),
22404 "b" ((dev->bus->number << 8) | dev->devfn),
22405 "c" ((irq << 8) | (pin + 10)),
22406 - "S" (&pci_indirect));
22407 + "S" (&pci_indirect),
22408 + "r" (__PCIBIOS_DS));
22409 return !(ret & 0xff00);
22410 }
22411 EXPORT_SYMBOL(pcibios_set_irq_routing);
22412 diff -urNp linux-2.6.32.42/arch/x86/power/cpu.c linux-2.6.32.42/arch/x86/power/cpu.c
22413 --- linux-2.6.32.42/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
22414 +++ linux-2.6.32.42/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
22415 @@ -129,7 +129,7 @@ static void do_fpu_end(void)
22416 static void fix_processor_context(void)
22417 {
22418 int cpu = smp_processor_id();
22419 - struct tss_struct *t = &per_cpu(init_tss, cpu);
22420 + struct tss_struct *t = init_tss + cpu;
22421
22422 set_tss_desc(cpu, t); /*
22423 * This just modifies memory; should not be
22424 @@ -139,7 +139,9 @@ static void fix_processor_context(void)
22425 */
22426
22427 #ifdef CONFIG_X86_64
22428 + pax_open_kernel();
22429 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
22430 + pax_close_kernel();
22431
22432 syscall_init(); /* This sets MSR_*STAR and related */
22433 #endif
22434 diff -urNp linux-2.6.32.42/arch/x86/vdso/Makefile linux-2.6.32.42/arch/x86/vdso/Makefile
22435 --- linux-2.6.32.42/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
22436 +++ linux-2.6.32.42/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
22437 @@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
22438 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
22439 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
22440
22441 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22442 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
22443 GCOV_PROFILE := n
22444
22445 #
22446 diff -urNp linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c
22447 --- linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
22448 +++ linux-2.6.32.42/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
22449 @@ -22,24 +22,48 @@
22450 #include <asm/hpet.h>
22451 #include <asm/unistd.h>
22452 #include <asm/io.h>
22453 +#include <asm/fixmap.h>
22454 #include "vextern.h"
22455
22456 #define gtod vdso_vsyscall_gtod_data
22457
22458 +notrace noinline long __vdso_fallback_time(long *t)
22459 +{
22460 + long secs;
22461 + asm volatile("syscall"
22462 + : "=a" (secs)
22463 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
22464 + return secs;
22465 +}
22466 +
22467 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
22468 {
22469 long ret;
22470 asm("syscall" : "=a" (ret) :
22471 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
22472 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
22473 return ret;
22474 }
22475
22476 +notrace static inline cycle_t __vdso_vread_hpet(void)
22477 +{
22478 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
22479 +}
22480 +
22481 +notrace static inline cycle_t __vdso_vread_tsc(void)
22482 +{
22483 + cycle_t ret = (cycle_t)vget_cycles();
22484 +
22485 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
22486 +}
22487 +
22488 notrace static inline long vgetns(void)
22489 {
22490 long v;
22491 - cycles_t (*vread)(void);
22492 - vread = gtod->clock.vread;
22493 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
22494 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
22495 + v = __vdso_vread_tsc();
22496 + else
22497 + v = __vdso_vread_hpet();
22498 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
22499 return (v * gtod->clock.mult) >> gtod->clock.shift;
22500 }
22501
22502 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
22503
22504 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
22505 {
22506 - if (likely(gtod->sysctl_enabled))
22507 + if (likely(gtod->sysctl_enabled &&
22508 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
22509 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
22510 switch (clock) {
22511 case CLOCK_REALTIME:
22512 if (likely(gtod->clock.vread))
22513 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
22514 int clock_gettime(clockid_t, struct timespec *)
22515 __attribute__((weak, alias("__vdso_clock_gettime")));
22516
22517 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
22518 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
22519 {
22520 long ret;
22521 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
22522 + asm("syscall" : "=a" (ret) :
22523 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
22524 + return ret;
22525 +}
22526 +
22527 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
22528 +{
22529 + if (likely(gtod->sysctl_enabled &&
22530 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
22531 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
22532 + {
22533 if (likely(tv != NULL)) {
22534 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
22535 offsetof(struct timespec, tv_nsec) ||
22536 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
22537 }
22538 return 0;
22539 }
22540 - asm("syscall" : "=a" (ret) :
22541 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
22542 - return ret;
22543 + return __vdso_fallback_gettimeofday(tv, tz);
22544 }
22545 int gettimeofday(struct timeval *, struct timezone *)
22546 __attribute__((weak, alias("__vdso_gettimeofday")));
22547 diff -urNp linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c
22548 --- linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
22549 +++ linux-2.6.32.42/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
22550 @@ -25,6 +25,7 @@
22551 #include <asm/tlbflush.h>
22552 #include <asm/vdso.h>
22553 #include <asm/proto.h>
22554 +#include <asm/mman.h>
22555
22556 enum {
22557 VDSO_DISABLED = 0,
22558 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
22559 void enable_sep_cpu(void)
22560 {
22561 int cpu = get_cpu();
22562 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
22563 + struct tss_struct *tss = init_tss + cpu;
22564
22565 if (!boot_cpu_has(X86_FEATURE_SEP)) {
22566 put_cpu();
22567 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
22568 gate_vma.vm_start = FIXADDR_USER_START;
22569 gate_vma.vm_end = FIXADDR_USER_END;
22570 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
22571 - gate_vma.vm_page_prot = __P101;
22572 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
22573 /*
22574 * Make sure the vDSO gets into every core dump.
22575 * Dumping its contents makes post-mortem fully interpretable later
22576 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
22577 if (compat)
22578 addr = VDSO_HIGH_BASE;
22579 else {
22580 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
22581 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
22582 if (IS_ERR_VALUE(addr)) {
22583 ret = addr;
22584 goto up_fail;
22585 }
22586 }
22587
22588 - current->mm->context.vdso = (void *)addr;
22589 + current->mm->context.vdso = addr;
22590
22591 if (compat_uses_vma || !compat) {
22592 /*
22593 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
22594 }
22595
22596 current_thread_info()->sysenter_return =
22597 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22598 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
22599
22600 up_fail:
22601 if (ret)
22602 - current->mm->context.vdso = NULL;
22603 + current->mm->context.vdso = 0;
22604
22605 up_write(&mm->mmap_sem);
22606
22607 @@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
22608
22609 const char *arch_vma_name(struct vm_area_struct *vma)
22610 {
22611 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
22612 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
22613 return "[vdso]";
22614 +
22615 +#ifdef CONFIG_PAX_SEGMEXEC
22616 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
22617 + return "[vdso]";
22618 +#endif
22619 +
22620 return NULL;
22621 }
22622
22623 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
22624 struct mm_struct *mm = tsk->mm;
22625
22626 /* Check to see if this task was created in compat vdso mode */
22627 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
22628 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
22629 return &gate_vma;
22630 return NULL;
22631 }
22632 diff -urNp linux-2.6.32.42/arch/x86/vdso/vdso.lds.S linux-2.6.32.42/arch/x86/vdso/vdso.lds.S
22633 --- linux-2.6.32.42/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
22634 +++ linux-2.6.32.42/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
22635 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
22636 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
22637 #include "vextern.h"
22638 #undef VEXTERN
22639 +
22640 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
22641 +VEXTERN(fallback_gettimeofday)
22642 +VEXTERN(fallback_time)
22643 +VEXTERN(getcpu)
22644 +#undef VEXTERN
22645 diff -urNp linux-2.6.32.42/arch/x86/vdso/vextern.h linux-2.6.32.42/arch/x86/vdso/vextern.h
22646 --- linux-2.6.32.42/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
22647 +++ linux-2.6.32.42/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
22648 @@ -11,6 +11,5 @@
22649 put into vextern.h and be referenced as a pointer with vdso prefix.
22650 The main kernel later fills in the values. */
22651
22652 -VEXTERN(jiffies)
22653 VEXTERN(vgetcpu_mode)
22654 VEXTERN(vsyscall_gtod_data)
22655 diff -urNp linux-2.6.32.42/arch/x86/vdso/vma.c linux-2.6.32.42/arch/x86/vdso/vma.c
22656 --- linux-2.6.32.42/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
22657 +++ linux-2.6.32.42/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
22658 @@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
22659 if (!vbase)
22660 goto oom;
22661
22662 - if (memcmp(vbase, "\177ELF", 4)) {
22663 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
22664 printk("VDSO: I'm broken; not ELF\n");
22665 vdso_enabled = 0;
22666 }
22667 @@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
22668 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
22669 #include "vextern.h"
22670 #undef VEXTERN
22671 + vunmap(vbase);
22672 return 0;
22673
22674 oom:
22675 @@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
22676 goto up_fail;
22677 }
22678
22679 - current->mm->context.vdso = (void *)addr;
22680 + current->mm->context.vdso = addr;
22681
22682 ret = install_special_mapping(mm, addr, vdso_size,
22683 VM_READ|VM_EXEC|
22684 @@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
22685 VM_ALWAYSDUMP,
22686 vdso_pages);
22687 if (ret) {
22688 - current->mm->context.vdso = NULL;
22689 + current->mm->context.vdso = 0;
22690 goto up_fail;
22691 }
22692
22693 @@ -132,10 +133,3 @@ up_fail:
22694 up_write(&mm->mmap_sem);
22695 return ret;
22696 }
22697 -
22698 -static __init int vdso_setup(char *s)
22699 -{
22700 - vdso_enabled = simple_strtoul(s, NULL, 0);
22701 - return 0;
22702 -}
22703 -__setup("vdso=", vdso_setup);
22704 diff -urNp linux-2.6.32.42/arch/x86/xen/enlighten.c linux-2.6.32.42/arch/x86/xen/enlighten.c
22705 --- linux-2.6.32.42/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
22706 +++ linux-2.6.32.42/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
22707 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
22708
22709 struct shared_info xen_dummy_shared_info;
22710
22711 -void *xen_initial_gdt;
22712 -
22713 /*
22714 * Point at some empty memory to start with. We map the real shared_info
22715 * page as soon as fixmap is up and running.
22716 @@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
22717
22718 preempt_disable();
22719
22720 - start = __get_cpu_var(idt_desc).address;
22721 + start = (unsigned long)__get_cpu_var(idt_desc).address;
22722 end = start + __get_cpu_var(idt_desc).size + 1;
22723
22724 xen_mc_flush();
22725 @@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
22726 #endif
22727 };
22728
22729 -static void xen_reboot(int reason)
22730 +static __noreturn void xen_reboot(int reason)
22731 {
22732 struct sched_shutdown r = { .reason = reason };
22733
22734 @@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
22735 BUG();
22736 }
22737
22738 -static void xen_restart(char *msg)
22739 +static __noreturn void xen_restart(char *msg)
22740 {
22741 xen_reboot(SHUTDOWN_reboot);
22742 }
22743
22744 -static void xen_emergency_restart(void)
22745 +static __noreturn void xen_emergency_restart(void)
22746 {
22747 xen_reboot(SHUTDOWN_reboot);
22748 }
22749
22750 -static void xen_machine_halt(void)
22751 +static __noreturn void xen_machine_halt(void)
22752 {
22753 xen_reboot(SHUTDOWN_poweroff);
22754 }
22755 @@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
22756 */
22757 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
22758
22759 -#ifdef CONFIG_X86_64
22760 /* Work out if we support NX */
22761 - check_efer();
22762 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22763 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
22764 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
22765 + unsigned l, h;
22766 +
22767 +#ifdef CONFIG_X86_PAE
22768 + nx_enabled = 1;
22769 +#endif
22770 + __supported_pte_mask |= _PAGE_NX;
22771 + rdmsr(MSR_EFER, l, h);
22772 + l |= EFER_NX;
22773 + wrmsr(MSR_EFER, l, h);
22774 + }
22775 #endif
22776
22777 xen_setup_features();
22778 @@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
22779
22780 machine_ops = xen_machine_ops;
22781
22782 - /*
22783 - * The only reliable way to retain the initial address of the
22784 - * percpu gdt_page is to remember it here, so we can go and
22785 - * mark it RW later, when the initial percpu area is freed.
22786 - */
22787 - xen_initial_gdt = &per_cpu(gdt_page, 0);
22788 -
22789 xen_smp_init();
22790
22791 pgd = (pgd_t *)xen_start_info->pt_base;
22792 diff -urNp linux-2.6.32.42/arch/x86/xen/mmu.c linux-2.6.32.42/arch/x86/xen/mmu.c
22793 --- linux-2.6.32.42/arch/x86/xen/mmu.c 2011-06-25 12:55:34.000000000 -0400
22794 +++ linux-2.6.32.42/arch/x86/xen/mmu.c 2011-06-25 12:56:37.000000000 -0400
22795 @@ -1714,6 +1714,8 @@ __init pgd_t *xen_setup_kernel_pagetable
22796 convert_pfn_mfn(init_level4_pgt);
22797 convert_pfn_mfn(level3_ident_pgt);
22798 convert_pfn_mfn(level3_kernel_pgt);
22799 + convert_pfn_mfn(level3_vmalloc_pgt);
22800 + convert_pfn_mfn(level3_vmemmap_pgt);
22801
22802 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
22803 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
22804 @@ -1732,7 +1734,10 @@ __init pgd_t *xen_setup_kernel_pagetable
22805 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
22806 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
22807 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
22808 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
22809 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
22810 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
22811 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
22812 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
22813 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
22814
22815 diff -urNp linux-2.6.32.42/arch/x86/xen/smp.c linux-2.6.32.42/arch/x86/xen/smp.c
22816 --- linux-2.6.32.42/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
22817 +++ linux-2.6.32.42/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
22818 @@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
22819 {
22820 BUG_ON(smp_processor_id() != 0);
22821 native_smp_prepare_boot_cpu();
22822 -
22823 - /* We've switched to the "real" per-cpu gdt, so make sure the
22824 - old memory can be recycled */
22825 - make_lowmem_page_readwrite(xen_initial_gdt);
22826 -
22827 xen_setup_vcpu_info_placement();
22828 }
22829
22830 @@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
22831 gdt = get_cpu_gdt_table(cpu);
22832
22833 ctxt->flags = VGCF_IN_KERNEL;
22834 - ctxt->user_regs.ds = __USER_DS;
22835 - ctxt->user_regs.es = __USER_DS;
22836 + ctxt->user_regs.ds = __KERNEL_DS;
22837 + ctxt->user_regs.es = __KERNEL_DS;
22838 ctxt->user_regs.ss = __KERNEL_DS;
22839 #ifdef CONFIG_X86_32
22840 ctxt->user_regs.fs = __KERNEL_PERCPU;
22841 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
22842 + savesegment(gs, ctxt->user_regs.gs);
22843 #else
22844 ctxt->gs_base_kernel = per_cpu_offset(cpu);
22845 #endif
22846 @@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
22847 int rc;
22848
22849 per_cpu(current_task, cpu) = idle;
22850 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
22851 #ifdef CONFIG_X86_32
22852 irq_ctx_init(cpu);
22853 #else
22854 clear_tsk_thread_flag(idle, TIF_FORK);
22855 - per_cpu(kernel_stack, cpu) =
22856 - (unsigned long)task_stack_page(idle) -
22857 - KERNEL_STACK_OFFSET + THREAD_SIZE;
22858 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22859 #endif
22860 xen_setup_runstate_info(cpu);
22861 xen_setup_timer(cpu);
22862 diff -urNp linux-2.6.32.42/arch/x86/xen/xen-asm_32.S linux-2.6.32.42/arch/x86/xen/xen-asm_32.S
22863 --- linux-2.6.32.42/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
22864 +++ linux-2.6.32.42/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
22865 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
22866 ESP_OFFSET=4 # bytes pushed onto stack
22867
22868 /*
22869 - * Store vcpu_info pointer for easy access. Do it this way to
22870 - * avoid having to reload %fs
22871 + * Store vcpu_info pointer for easy access.
22872 */
22873 #ifdef CONFIG_SMP
22874 - GET_THREAD_INFO(%eax)
22875 - movl TI_cpu(%eax), %eax
22876 - movl __per_cpu_offset(,%eax,4), %eax
22877 - mov per_cpu__xen_vcpu(%eax), %eax
22878 + push %fs
22879 + mov $(__KERNEL_PERCPU), %eax
22880 + mov %eax, %fs
22881 + mov PER_CPU_VAR(xen_vcpu), %eax
22882 + pop %fs
22883 #else
22884 movl per_cpu__xen_vcpu, %eax
22885 #endif
22886 diff -urNp linux-2.6.32.42/arch/x86/xen/xen-head.S linux-2.6.32.42/arch/x86/xen/xen-head.S
22887 --- linux-2.6.32.42/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
22888 +++ linux-2.6.32.42/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
22889 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
22890 #ifdef CONFIG_X86_32
22891 mov %esi,xen_start_info
22892 mov $init_thread_union+THREAD_SIZE,%esp
22893 +#ifdef CONFIG_SMP
22894 + movl $cpu_gdt_table,%edi
22895 + movl $__per_cpu_load,%eax
22896 + movw %ax,__KERNEL_PERCPU + 2(%edi)
22897 + rorl $16,%eax
22898 + movb %al,__KERNEL_PERCPU + 4(%edi)
22899 + movb %ah,__KERNEL_PERCPU + 7(%edi)
22900 + movl $__per_cpu_end - 1,%eax
22901 + subl $__per_cpu_start,%eax
22902 + movw %ax,__KERNEL_PERCPU + 0(%edi)
22903 +#endif
22904 #else
22905 mov %rsi,xen_start_info
22906 mov $init_thread_union+THREAD_SIZE,%rsp
22907 diff -urNp linux-2.6.32.42/arch/x86/xen/xen-ops.h linux-2.6.32.42/arch/x86/xen/xen-ops.h
22908 --- linux-2.6.32.42/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
22909 +++ linux-2.6.32.42/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
22910 @@ -10,8 +10,6 @@
22911 extern const char xen_hypervisor_callback[];
22912 extern const char xen_failsafe_callback[];
22913
22914 -extern void *xen_initial_gdt;
22915 -
22916 struct trap_info;
22917 void xen_copy_trap_info(struct trap_info *traps);
22918
22919 diff -urNp linux-2.6.32.42/block/blk-integrity.c linux-2.6.32.42/block/blk-integrity.c
22920 --- linux-2.6.32.42/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
22921 +++ linux-2.6.32.42/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
22922 @@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
22923 NULL,
22924 };
22925
22926 -static struct sysfs_ops integrity_ops = {
22927 +static const struct sysfs_ops integrity_ops = {
22928 .show = &integrity_attr_show,
22929 .store = &integrity_attr_store,
22930 };
22931 diff -urNp linux-2.6.32.42/block/blk-iopoll.c linux-2.6.32.42/block/blk-iopoll.c
22932 --- linux-2.6.32.42/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
22933 +++ linux-2.6.32.42/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
22934 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
22935 }
22936 EXPORT_SYMBOL(blk_iopoll_complete);
22937
22938 -static void blk_iopoll_softirq(struct softirq_action *h)
22939 +static void blk_iopoll_softirq(void)
22940 {
22941 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
22942 int rearm = 0, budget = blk_iopoll_budget;
22943 diff -urNp linux-2.6.32.42/block/blk-map.c linux-2.6.32.42/block/blk-map.c
22944 --- linux-2.6.32.42/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
22945 +++ linux-2.6.32.42/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
22946 @@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
22947 * direct dma. else, set up kernel bounce buffers
22948 */
22949 uaddr = (unsigned long) ubuf;
22950 - if (blk_rq_aligned(q, ubuf, len) && !map_data)
22951 + if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
22952 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
22953 else
22954 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
22955 @@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
22956 for (i = 0; i < iov_count; i++) {
22957 unsigned long uaddr = (unsigned long)iov[i].iov_base;
22958
22959 + if (!iov[i].iov_len)
22960 + return -EINVAL;
22961 +
22962 if (uaddr & queue_dma_alignment(q)) {
22963 unaligned = 1;
22964 break;
22965 }
22966 - if (!iov[i].iov_len)
22967 - return -EINVAL;
22968 }
22969
22970 if (unaligned || (q->dma_pad_mask & len) || map_data)
22971 @@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
22972 if (!len || !kbuf)
22973 return -EINVAL;
22974
22975 - do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
22976 + do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
22977 if (do_copy)
22978 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
22979 else
22980 diff -urNp linux-2.6.32.42/block/blk-softirq.c linux-2.6.32.42/block/blk-softirq.c
22981 --- linux-2.6.32.42/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
22982 +++ linux-2.6.32.42/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
22983 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
22984 * Softirq action handler - move entries to local list and loop over them
22985 * while passing them to the queue registered handler.
22986 */
22987 -static void blk_done_softirq(struct softirq_action *h)
22988 +static void blk_done_softirq(void)
22989 {
22990 struct list_head *cpu_list, local_list;
22991
22992 diff -urNp linux-2.6.32.42/block/blk-sysfs.c linux-2.6.32.42/block/blk-sysfs.c
22993 --- linux-2.6.32.42/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
22994 +++ linux-2.6.32.42/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
22995 @@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
22996 kmem_cache_free(blk_requestq_cachep, q);
22997 }
22998
22999 -static struct sysfs_ops queue_sysfs_ops = {
23000 +static const struct sysfs_ops queue_sysfs_ops = {
23001 .show = queue_attr_show,
23002 .store = queue_attr_store,
23003 };
23004 diff -urNp linux-2.6.32.42/block/bsg.c linux-2.6.32.42/block/bsg.c
23005 --- linux-2.6.32.42/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
23006 +++ linux-2.6.32.42/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
23007 @@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23008 struct sg_io_v4 *hdr, struct bsg_device *bd,
23009 fmode_t has_write_perm)
23010 {
23011 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23012 + unsigned char *cmdptr;
23013 +
23014 if (hdr->request_len > BLK_MAX_CDB) {
23015 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23016 if (!rq->cmd)
23017 return -ENOMEM;
23018 - }
23019 + cmdptr = rq->cmd;
23020 + } else
23021 + cmdptr = tmpcmd;
23022
23023 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
23024 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
23025 hdr->request_len))
23026 return -EFAULT;
23027
23028 + if (cmdptr != rq->cmd)
23029 + memcpy(rq->cmd, cmdptr, hdr->request_len);
23030 +
23031 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23032 if (blk_verify_command(rq->cmd, has_write_perm))
23033 return -EPERM;
23034 diff -urNp linux-2.6.32.42/block/elevator.c linux-2.6.32.42/block/elevator.c
23035 --- linux-2.6.32.42/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
23036 +++ linux-2.6.32.42/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
23037 @@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
23038 return error;
23039 }
23040
23041 -static struct sysfs_ops elv_sysfs_ops = {
23042 +static const struct sysfs_ops elv_sysfs_ops = {
23043 .show = elv_attr_show,
23044 .store = elv_attr_store,
23045 };
23046 diff -urNp linux-2.6.32.42/block/scsi_ioctl.c linux-2.6.32.42/block/scsi_ioctl.c
23047 --- linux-2.6.32.42/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
23048 +++ linux-2.6.32.42/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
23049 @@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
23050 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23051 struct sg_io_hdr *hdr, fmode_t mode)
23052 {
23053 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23054 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23055 + unsigned char *cmdptr;
23056 +
23057 + if (rq->cmd != rq->__cmd)
23058 + cmdptr = rq->cmd;
23059 + else
23060 + cmdptr = tmpcmd;
23061 +
23062 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23063 return -EFAULT;
23064 +
23065 + if (cmdptr != rq->cmd)
23066 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23067 +
23068 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23069 return -EPERM;
23070
23071 @@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
23072 int err;
23073 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23074 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23075 + unsigned char tmpcmd[sizeof(rq->__cmd)];
23076 + unsigned char *cmdptr;
23077
23078 if (!sic)
23079 return -EINVAL;
23080 @@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
23081 */
23082 err = -EFAULT;
23083 rq->cmd_len = cmdlen;
23084 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
23085 +
23086 + if (rq->cmd != rq->__cmd)
23087 + cmdptr = rq->cmd;
23088 + else
23089 + cmdptr = tmpcmd;
23090 +
23091 + if (copy_from_user(cmdptr, sic->data, cmdlen))
23092 goto error;
23093
23094 + if (rq->cmd != cmdptr)
23095 + memcpy(rq->cmd, cmdptr, cmdlen);
23096 +
23097 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23098 goto error;
23099
23100 diff -urNp linux-2.6.32.42/crypto/serpent.c linux-2.6.32.42/crypto/serpent.c
23101 --- linux-2.6.32.42/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
23102 +++ linux-2.6.32.42/crypto/serpent.c 2011-05-16 21:46:57.000000000 -0400
23103 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
23104 u32 r0,r1,r2,r3,r4;
23105 int i;
23106
23107 + pax_track_stack();
23108 +
23109 /* Copy key, add padding */
23110
23111 for (i = 0; i < keylen; ++i)
23112 diff -urNp linux-2.6.32.42/Documentation/dontdiff linux-2.6.32.42/Documentation/dontdiff
23113 --- linux-2.6.32.42/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
23114 +++ linux-2.6.32.42/Documentation/dontdiff 2011-05-18 20:09:36.000000000 -0400
23115 @@ -1,13 +1,16 @@
23116 *.a
23117 *.aux
23118 *.bin
23119 +*.cis
23120 *.cpio
23121 *.csp
23122 +*.dbg
23123 *.dsp
23124 *.dvi
23125 *.elf
23126 *.eps
23127 *.fw
23128 +*.gcno
23129 *.gen.S
23130 *.gif
23131 *.grep
23132 @@ -38,8 +41,10 @@
23133 *.tab.h
23134 *.tex
23135 *.ver
23136 +*.vim
23137 *.xml
23138 *_MODULES
23139 +*_reg_safe.h
23140 *_vga16.c
23141 *~
23142 *.9
23143 @@ -49,11 +54,16 @@
23144 53c700_d.h
23145 CVS
23146 ChangeSet
23147 +GPATH
23148 +GRTAGS
23149 +GSYMS
23150 +GTAGS
23151 Image
23152 Kerntypes
23153 Module.markers
23154 Module.symvers
23155 PENDING
23156 +PERF*
23157 SCCS
23158 System.map*
23159 TAGS
23160 @@ -76,7 +86,11 @@ btfixupprep
23161 build
23162 bvmlinux
23163 bzImage*
23164 +capability_names.h
23165 +capflags.c
23166 classlist.h*
23167 +clut_vga16.c
23168 +common-cmds.h
23169 comp*.log
23170 compile.h*
23171 conf
23172 @@ -103,13 +117,14 @@ gen_crc32table
23173 gen_init_cpio
23174 genksyms
23175 *_gray256.c
23176 +hash
23177 ihex2fw
23178 ikconfig.h*
23179 initramfs_data.cpio
23180 +initramfs_data.cpio.bz2
23181 initramfs_data.cpio.gz
23182 initramfs_list
23183 kallsyms
23184 -kconfig
23185 keywords.c
23186 ksym.c*
23187 ksym.h*
23188 @@ -133,7 +148,9 @@ mkboot
23189 mkbugboot
23190 mkcpustr
23191 mkdep
23192 +mkpiggy
23193 mkprep
23194 +mkregtable
23195 mktables
23196 mktree
23197 modpost
23198 @@ -149,6 +166,7 @@ patches*
23199 pca200e.bin
23200 pca200e_ecd.bin2
23201 piggy.gz
23202 +piggy.S
23203 piggyback
23204 pnmtologo
23205 ppc_defs.h*
23206 @@ -157,12 +175,15 @@ qconf
23207 raid6altivec*.c
23208 raid6int*.c
23209 raid6tables.c
23210 +regdb.c
23211 relocs
23212 +rlim_names.h
23213 series
23214 setup
23215 setup.bin
23216 setup.elf
23217 sImage
23218 +slabinfo
23219 sm_tbl*
23220 split-include
23221 syscalltab.h
23222 @@ -186,14 +207,20 @@ version.h*
23223 vmlinux
23224 vmlinux-*
23225 vmlinux.aout
23226 +vmlinux.bin.all
23227 +vmlinux.bin.bz2
23228 vmlinux.lds
23229 +vmlinux.relocs
23230 +voffset.h
23231 vsyscall.lds
23232 vsyscall_32.lds
23233 wanxlfw.inc
23234 uImage
23235 unifdef
23236 +utsrelease.h
23237 wakeup.bin
23238 wakeup.elf
23239 wakeup.lds
23240 zImage*
23241 zconf.hash.c
23242 +zoffset.h
23243 diff -urNp linux-2.6.32.42/Documentation/kernel-parameters.txt linux-2.6.32.42/Documentation/kernel-parameters.txt
23244 --- linux-2.6.32.42/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
23245 +++ linux-2.6.32.42/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
23246 @@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
23247 the specified number of seconds. This is to be used if
23248 your oopses keep scrolling off the screen.
23249
23250 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
23251 + virtualization environments that don't cope well with the
23252 + expand down segment used by UDEREF on X86-32 or the frequent
23253 + page table updates on X86-64.
23254 +
23255 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
23256 +
23257 pcbit= [HW,ISDN]
23258
23259 pcd. [PARIDE]
23260 diff -urNp linux-2.6.32.42/drivers/acpi/acpi_pad.c linux-2.6.32.42/drivers/acpi/acpi_pad.c
23261 --- linux-2.6.32.42/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
23262 +++ linux-2.6.32.42/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
23263 @@ -30,7 +30,7 @@
23264 #include <acpi/acpi_bus.h>
23265 #include <acpi/acpi_drivers.h>
23266
23267 -#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
23268 +#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
23269 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
23270 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
23271 static DEFINE_MUTEX(isolated_cpus_lock);
23272 diff -urNp linux-2.6.32.42/drivers/acpi/battery.c linux-2.6.32.42/drivers/acpi/battery.c
23273 --- linux-2.6.32.42/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
23274 +++ linux-2.6.32.42/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
23275 @@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
23276 }
23277
23278 static struct battery_file {
23279 - struct file_operations ops;
23280 + const struct file_operations ops;
23281 mode_t mode;
23282 const char *name;
23283 } acpi_battery_file[] = {
23284 diff -urNp linux-2.6.32.42/drivers/acpi/dock.c linux-2.6.32.42/drivers/acpi/dock.c
23285 --- linux-2.6.32.42/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
23286 +++ linux-2.6.32.42/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
23287 @@ -77,7 +77,7 @@ struct dock_dependent_device {
23288 struct list_head list;
23289 struct list_head hotplug_list;
23290 acpi_handle handle;
23291 - struct acpi_dock_ops *ops;
23292 + const struct acpi_dock_ops *ops;
23293 void *context;
23294 };
23295
23296 @@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
23297 * the dock driver after _DCK is executed.
23298 */
23299 int
23300 -register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
23301 +register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
23302 void *context)
23303 {
23304 struct dock_dependent_device *dd;
23305 diff -urNp linux-2.6.32.42/drivers/acpi/osl.c linux-2.6.32.42/drivers/acpi/osl.c
23306 --- linux-2.6.32.42/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
23307 +++ linux-2.6.32.42/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
23308 @@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
23309 void __iomem *virt_addr;
23310
23311 virt_addr = ioremap(phys_addr, width);
23312 + if (!virt_addr)
23313 + return AE_NO_MEMORY;
23314 if (!value)
23315 value = &dummy;
23316
23317 @@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
23318 void __iomem *virt_addr;
23319
23320 virt_addr = ioremap(phys_addr, width);
23321 + if (!virt_addr)
23322 + return AE_NO_MEMORY;
23323
23324 switch (width) {
23325 case 8:
23326 diff -urNp linux-2.6.32.42/drivers/acpi/power_meter.c linux-2.6.32.42/drivers/acpi/power_meter.c
23327 --- linux-2.6.32.42/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
23328 +++ linux-2.6.32.42/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
23329 @@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
23330 return res;
23331
23332 temp /= 1000;
23333 - if (temp < 0)
23334 - return -EINVAL;
23335
23336 mutex_lock(&resource->lock);
23337 resource->trip[attr->index - 7] = temp;
23338 diff -urNp linux-2.6.32.42/drivers/acpi/proc.c linux-2.6.32.42/drivers/acpi/proc.c
23339 --- linux-2.6.32.42/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
23340 +++ linux-2.6.32.42/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
23341 @@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
23342 size_t count, loff_t * ppos)
23343 {
23344 struct list_head *node, *next;
23345 - char strbuf[5];
23346 - char str[5] = "";
23347 - unsigned int len = count;
23348 + char strbuf[5] = {0};
23349 struct acpi_device *found_dev = NULL;
23350
23351 - if (len > 4)
23352 - len = 4;
23353 - if (len < 0)
23354 - return -EFAULT;
23355 + if (count > 4)
23356 + count = 4;
23357
23358 - if (copy_from_user(strbuf, buffer, len))
23359 + if (copy_from_user(strbuf, buffer, count))
23360 return -EFAULT;
23361 - strbuf[len] = '\0';
23362 - sscanf(strbuf, "%s", str);
23363 + strbuf[count] = '\0';
23364
23365 mutex_lock(&acpi_device_lock);
23366 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
23367 @@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
23368 if (!dev->wakeup.flags.valid)
23369 continue;
23370
23371 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
23372 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
23373 dev->wakeup.state.enabled =
23374 dev->wakeup.state.enabled ? 0 : 1;
23375 found_dev = dev;
23376 diff -urNp linux-2.6.32.42/drivers/acpi/processor_core.c linux-2.6.32.42/drivers/acpi/processor_core.c
23377 --- linux-2.6.32.42/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
23378 +++ linux-2.6.32.42/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
23379 @@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
23380 return 0;
23381 }
23382
23383 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
23384 + BUG_ON(pr->id >= nr_cpu_ids);
23385
23386 /*
23387 * Buggy BIOS check
23388 diff -urNp linux-2.6.32.42/drivers/acpi/sbshc.c linux-2.6.32.42/drivers/acpi/sbshc.c
23389 --- linux-2.6.32.42/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
23390 +++ linux-2.6.32.42/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
23391 @@ -17,7 +17,7 @@
23392
23393 #define PREFIX "ACPI: "
23394
23395 -#define ACPI_SMB_HC_CLASS "smbus_host_controller"
23396 +#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
23397 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
23398
23399 struct acpi_smb_hc {
23400 diff -urNp linux-2.6.32.42/drivers/acpi/sleep.c linux-2.6.32.42/drivers/acpi/sleep.c
23401 --- linux-2.6.32.42/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
23402 +++ linux-2.6.32.42/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
23403 @@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
23404 }
23405 }
23406
23407 -static struct platform_suspend_ops acpi_suspend_ops = {
23408 +static const struct platform_suspend_ops acpi_suspend_ops = {
23409 .valid = acpi_suspend_state_valid,
23410 .begin = acpi_suspend_begin,
23411 .prepare_late = acpi_pm_prepare,
23412 @@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
23413 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
23414 * been requested.
23415 */
23416 -static struct platform_suspend_ops acpi_suspend_ops_old = {
23417 +static const struct platform_suspend_ops acpi_suspend_ops_old = {
23418 .valid = acpi_suspend_state_valid,
23419 .begin = acpi_suspend_begin_old,
23420 .prepare_late = acpi_pm_disable_gpes,
23421 @@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
23422 acpi_enable_all_runtime_gpes();
23423 }
23424
23425 -static struct platform_hibernation_ops acpi_hibernation_ops = {
23426 +static const struct platform_hibernation_ops acpi_hibernation_ops = {
23427 .begin = acpi_hibernation_begin,
23428 .end = acpi_pm_end,
23429 .pre_snapshot = acpi_hibernation_pre_snapshot,
23430 @@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
23431 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
23432 * been requested.
23433 */
23434 -static struct platform_hibernation_ops acpi_hibernation_ops_old = {
23435 +static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
23436 .begin = acpi_hibernation_begin_old,
23437 .end = acpi_pm_end,
23438 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
23439 diff -urNp linux-2.6.32.42/drivers/acpi/video.c linux-2.6.32.42/drivers/acpi/video.c
23440 --- linux-2.6.32.42/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
23441 +++ linux-2.6.32.42/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
23442 @@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
23443 vd->brightness->levels[request_level]);
23444 }
23445
23446 -static struct backlight_ops acpi_backlight_ops = {
23447 +static const struct backlight_ops acpi_backlight_ops = {
23448 .get_brightness = acpi_video_get_brightness,
23449 .update_status = acpi_video_set_brightness,
23450 };
23451 diff -urNp linux-2.6.32.42/drivers/ata/ahci.c linux-2.6.32.42/drivers/ata/ahci.c
23452 --- linux-2.6.32.42/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
23453 +++ linux-2.6.32.42/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
23454 @@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
23455 .sdev_attrs = ahci_sdev_attrs,
23456 };
23457
23458 -static struct ata_port_operations ahci_ops = {
23459 +static const struct ata_port_operations ahci_ops = {
23460 .inherits = &sata_pmp_port_ops,
23461
23462 .qc_defer = sata_pmp_qc_defer_cmd_switch,
23463 @@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
23464 .port_stop = ahci_port_stop,
23465 };
23466
23467 -static struct ata_port_operations ahci_vt8251_ops = {
23468 +static const struct ata_port_operations ahci_vt8251_ops = {
23469 .inherits = &ahci_ops,
23470 .hardreset = ahci_vt8251_hardreset,
23471 };
23472
23473 -static struct ata_port_operations ahci_p5wdh_ops = {
23474 +static const struct ata_port_operations ahci_p5wdh_ops = {
23475 .inherits = &ahci_ops,
23476 .hardreset = ahci_p5wdh_hardreset,
23477 };
23478
23479 -static struct ata_port_operations ahci_sb600_ops = {
23480 +static const struct ata_port_operations ahci_sb600_ops = {
23481 .inherits = &ahci_ops,
23482 .softreset = ahci_sb600_softreset,
23483 .pmp_softreset = ahci_sb600_softreset,
23484 diff -urNp linux-2.6.32.42/drivers/ata/ata_generic.c linux-2.6.32.42/drivers/ata/ata_generic.c
23485 --- linux-2.6.32.42/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
23486 +++ linux-2.6.32.42/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
23487 @@ -104,7 +104,7 @@ static struct scsi_host_template generic
23488 ATA_BMDMA_SHT(DRV_NAME),
23489 };
23490
23491 -static struct ata_port_operations generic_port_ops = {
23492 +static const struct ata_port_operations generic_port_ops = {
23493 .inherits = &ata_bmdma_port_ops,
23494 .cable_detect = ata_cable_unknown,
23495 .set_mode = generic_set_mode,
23496 diff -urNp linux-2.6.32.42/drivers/ata/ata_piix.c linux-2.6.32.42/drivers/ata/ata_piix.c
23497 --- linux-2.6.32.42/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
23498 +++ linux-2.6.32.42/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
23499 @@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
23500 ATA_BMDMA_SHT(DRV_NAME),
23501 };
23502
23503 -static struct ata_port_operations piix_pata_ops = {
23504 +static const struct ata_port_operations piix_pata_ops = {
23505 .inherits = &ata_bmdma32_port_ops,
23506 .cable_detect = ata_cable_40wire,
23507 .set_piomode = piix_set_piomode,
23508 @@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
23509 .prereset = piix_pata_prereset,
23510 };
23511
23512 -static struct ata_port_operations piix_vmw_ops = {
23513 +static const struct ata_port_operations piix_vmw_ops = {
23514 .inherits = &piix_pata_ops,
23515 .bmdma_status = piix_vmw_bmdma_status,
23516 };
23517
23518 -static struct ata_port_operations ich_pata_ops = {
23519 +static const struct ata_port_operations ich_pata_ops = {
23520 .inherits = &piix_pata_ops,
23521 .cable_detect = ich_pata_cable_detect,
23522 .set_dmamode = ich_set_dmamode,
23523 };
23524
23525 -static struct ata_port_operations piix_sata_ops = {
23526 +static const struct ata_port_operations piix_sata_ops = {
23527 .inherits = &ata_bmdma_port_ops,
23528 };
23529
23530 -static struct ata_port_operations piix_sidpr_sata_ops = {
23531 +static const struct ata_port_operations piix_sidpr_sata_ops = {
23532 .inherits = &piix_sata_ops,
23533 .hardreset = sata_std_hardreset,
23534 .scr_read = piix_sidpr_scr_read,
23535 diff -urNp linux-2.6.32.42/drivers/ata/libata-acpi.c linux-2.6.32.42/drivers/ata/libata-acpi.c
23536 --- linux-2.6.32.42/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
23537 +++ linux-2.6.32.42/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
23538 @@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
23539 ata_acpi_uevent(dev->link->ap, dev, event);
23540 }
23541
23542 -static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
23543 +static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
23544 .handler = ata_acpi_dev_notify_dock,
23545 .uevent = ata_acpi_dev_uevent,
23546 };
23547
23548 -static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
23549 +static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
23550 .handler = ata_acpi_ap_notify_dock,
23551 .uevent = ata_acpi_ap_uevent,
23552 };
23553 diff -urNp linux-2.6.32.42/drivers/ata/libata-core.c linux-2.6.32.42/drivers/ata/libata-core.c
23554 --- linux-2.6.32.42/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
23555 +++ linux-2.6.32.42/drivers/ata/libata-core.c 2011-04-23 12:56:10.000000000 -0400
23556 @@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
23557 struct ata_port *ap;
23558 unsigned int tag;
23559
23560 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23561 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23562 ap = qc->ap;
23563
23564 qc->flags = 0;
23565 @@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
23566 struct ata_port *ap;
23567 struct ata_link *link;
23568
23569 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23570 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
23571 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
23572 ap = qc->ap;
23573 link = qc->dev->link;
23574 @@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
23575 * LOCKING:
23576 * None.
23577 */
23578 -static void ata_finalize_port_ops(struct ata_port_operations *ops)
23579 +static void ata_finalize_port_ops(const struct ata_port_operations *ops)
23580 {
23581 static DEFINE_SPINLOCK(lock);
23582 const struct ata_port_operations *cur;
23583 @@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
23584 return;
23585
23586 spin_lock(&lock);
23587 + pax_open_kernel();
23588
23589 for (cur = ops->inherits; cur; cur = cur->inherits) {
23590 void **inherit = (void **)cur;
23591 @@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
23592 if (IS_ERR(*pp))
23593 *pp = NULL;
23594
23595 - ops->inherits = NULL;
23596 + ((struct ata_port_operations *)ops)->inherits = NULL;
23597
23598 + pax_close_kernel();
23599 spin_unlock(&lock);
23600 }
23601
23602 @@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
23603 */
23604 /* KILLME - the only user left is ipr */
23605 void ata_host_init(struct ata_host *host, struct device *dev,
23606 - unsigned long flags, struct ata_port_operations *ops)
23607 + unsigned long flags, const struct ata_port_operations *ops)
23608 {
23609 spin_lock_init(&host->lock);
23610 host->dev = dev;
23611 @@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
23612 /* truly dummy */
23613 }
23614
23615 -struct ata_port_operations ata_dummy_port_ops = {
23616 +const struct ata_port_operations ata_dummy_port_ops = {
23617 .qc_prep = ata_noop_qc_prep,
23618 .qc_issue = ata_dummy_qc_issue,
23619 .error_handler = ata_dummy_error_handler,
23620 diff -urNp linux-2.6.32.42/drivers/ata/libata-eh.c linux-2.6.32.42/drivers/ata/libata-eh.c
23621 --- linux-2.6.32.42/drivers/ata/libata-eh.c 2011-03-27 14:31:47.000000000 -0400
23622 +++ linux-2.6.32.42/drivers/ata/libata-eh.c 2011-05-16 21:46:57.000000000 -0400
23623 @@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
23624 {
23625 struct ata_link *link;
23626
23627 + pax_track_stack();
23628 +
23629 ata_for_each_link(link, ap, HOST_FIRST)
23630 ata_eh_link_report(link);
23631 }
23632 @@ -3590,7 +3592,7 @@ void ata_do_eh(struct ata_port *ap, ata_
23633 */
23634 void ata_std_error_handler(struct ata_port *ap)
23635 {
23636 - struct ata_port_operations *ops = ap->ops;
23637 + const struct ata_port_operations *ops = ap->ops;
23638 ata_reset_fn_t hardreset = ops->hardreset;
23639
23640 /* ignore built-in hardreset if SCR access is not available */
23641 diff -urNp linux-2.6.32.42/drivers/ata/libata-pmp.c linux-2.6.32.42/drivers/ata/libata-pmp.c
23642 --- linux-2.6.32.42/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
23643 +++ linux-2.6.32.42/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
23644 @@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
23645 */
23646 static int sata_pmp_eh_recover(struct ata_port *ap)
23647 {
23648 - struct ata_port_operations *ops = ap->ops;
23649 + const struct ata_port_operations *ops = ap->ops;
23650 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
23651 struct ata_link *pmp_link = &ap->link;
23652 struct ata_device *pmp_dev = pmp_link->device;
23653 diff -urNp linux-2.6.32.42/drivers/ata/pata_acpi.c linux-2.6.32.42/drivers/ata/pata_acpi.c
23654 --- linux-2.6.32.42/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
23655 +++ linux-2.6.32.42/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
23656 @@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
23657 ATA_BMDMA_SHT(DRV_NAME),
23658 };
23659
23660 -static struct ata_port_operations pacpi_ops = {
23661 +static const struct ata_port_operations pacpi_ops = {
23662 .inherits = &ata_bmdma_port_ops,
23663 .qc_issue = pacpi_qc_issue,
23664 .cable_detect = pacpi_cable_detect,
23665 diff -urNp linux-2.6.32.42/drivers/ata/pata_ali.c linux-2.6.32.42/drivers/ata/pata_ali.c
23666 --- linux-2.6.32.42/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
23667 +++ linux-2.6.32.42/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
23668 @@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
23669 * Port operations for PIO only ALi
23670 */
23671
23672 -static struct ata_port_operations ali_early_port_ops = {
23673 +static const struct ata_port_operations ali_early_port_ops = {
23674 .inherits = &ata_sff_port_ops,
23675 .cable_detect = ata_cable_40wire,
23676 .set_piomode = ali_set_piomode,
23677 @@ -382,7 +382,7 @@ static const struct ata_port_operations
23678 * Port operations for DMA capable ALi without cable
23679 * detect
23680 */
23681 -static struct ata_port_operations ali_20_port_ops = {
23682 +static const struct ata_port_operations ali_20_port_ops = {
23683 .inherits = &ali_dma_base_ops,
23684 .cable_detect = ata_cable_40wire,
23685 .mode_filter = ali_20_filter,
23686 @@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
23687 /*
23688 * Port operations for DMA capable ALi with cable detect
23689 */
23690 -static struct ata_port_operations ali_c2_port_ops = {
23691 +static const struct ata_port_operations ali_c2_port_ops = {
23692 .inherits = &ali_dma_base_ops,
23693 .check_atapi_dma = ali_check_atapi_dma,
23694 .cable_detect = ali_c2_cable_detect,
23695 @@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
23696 /*
23697 * Port operations for DMA capable ALi with cable detect
23698 */
23699 -static struct ata_port_operations ali_c4_port_ops = {
23700 +static const struct ata_port_operations ali_c4_port_ops = {
23701 .inherits = &ali_dma_base_ops,
23702 .check_atapi_dma = ali_check_atapi_dma,
23703 .cable_detect = ali_c2_cable_detect,
23704 @@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
23705 /*
23706 * Port operations for DMA capable ALi with cable detect and LBA48
23707 */
23708 -static struct ata_port_operations ali_c5_port_ops = {
23709 +static const struct ata_port_operations ali_c5_port_ops = {
23710 .inherits = &ali_dma_base_ops,
23711 .check_atapi_dma = ali_check_atapi_dma,
23712 .dev_config = ali_warn_atapi_dma,
23713 diff -urNp linux-2.6.32.42/drivers/ata/pata_amd.c linux-2.6.32.42/drivers/ata/pata_amd.c
23714 --- linux-2.6.32.42/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
23715 +++ linux-2.6.32.42/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
23716 @@ -397,28 +397,28 @@ static const struct ata_port_operations
23717 .prereset = amd_pre_reset,
23718 };
23719
23720 -static struct ata_port_operations amd33_port_ops = {
23721 +static const struct ata_port_operations amd33_port_ops = {
23722 .inherits = &amd_base_port_ops,
23723 .cable_detect = ata_cable_40wire,
23724 .set_piomode = amd33_set_piomode,
23725 .set_dmamode = amd33_set_dmamode,
23726 };
23727
23728 -static struct ata_port_operations amd66_port_ops = {
23729 +static const struct ata_port_operations amd66_port_ops = {
23730 .inherits = &amd_base_port_ops,
23731 .cable_detect = ata_cable_unknown,
23732 .set_piomode = amd66_set_piomode,
23733 .set_dmamode = amd66_set_dmamode,
23734 };
23735
23736 -static struct ata_port_operations amd100_port_ops = {
23737 +static const struct ata_port_operations amd100_port_ops = {
23738 .inherits = &amd_base_port_ops,
23739 .cable_detect = ata_cable_unknown,
23740 .set_piomode = amd100_set_piomode,
23741 .set_dmamode = amd100_set_dmamode,
23742 };
23743
23744 -static struct ata_port_operations amd133_port_ops = {
23745 +static const struct ata_port_operations amd133_port_ops = {
23746 .inherits = &amd_base_port_ops,
23747 .cable_detect = amd_cable_detect,
23748 .set_piomode = amd133_set_piomode,
23749 @@ -433,13 +433,13 @@ static const struct ata_port_operations
23750 .host_stop = nv_host_stop,
23751 };
23752
23753 -static struct ata_port_operations nv100_port_ops = {
23754 +static const struct ata_port_operations nv100_port_ops = {
23755 .inherits = &nv_base_port_ops,
23756 .set_piomode = nv100_set_piomode,
23757 .set_dmamode = nv100_set_dmamode,
23758 };
23759
23760 -static struct ata_port_operations nv133_port_ops = {
23761 +static const struct ata_port_operations nv133_port_ops = {
23762 .inherits = &nv_base_port_ops,
23763 .set_piomode = nv133_set_piomode,
23764 .set_dmamode = nv133_set_dmamode,
23765 diff -urNp linux-2.6.32.42/drivers/ata/pata_artop.c linux-2.6.32.42/drivers/ata/pata_artop.c
23766 --- linux-2.6.32.42/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
23767 +++ linux-2.6.32.42/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
23768 @@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
23769 ATA_BMDMA_SHT(DRV_NAME),
23770 };
23771
23772 -static struct ata_port_operations artop6210_ops = {
23773 +static const struct ata_port_operations artop6210_ops = {
23774 .inherits = &ata_bmdma_port_ops,
23775 .cable_detect = ata_cable_40wire,
23776 .set_piomode = artop6210_set_piomode,
23777 @@ -320,7 +320,7 @@ static struct ata_port_operations artop6
23778 .qc_defer = artop6210_qc_defer,
23779 };
23780
23781 -static struct ata_port_operations artop6260_ops = {
23782 +static const struct ata_port_operations artop6260_ops = {
23783 .inherits = &ata_bmdma_port_ops,
23784 .cable_detect = artop6260_cable_detect,
23785 .set_piomode = artop6260_set_piomode,
23786 diff -urNp linux-2.6.32.42/drivers/ata/pata_at32.c linux-2.6.32.42/drivers/ata/pata_at32.c
23787 --- linux-2.6.32.42/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
23788 +++ linux-2.6.32.42/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
23789 @@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
23790 ATA_PIO_SHT(DRV_NAME),
23791 };
23792
23793 -static struct ata_port_operations at32_port_ops = {
23794 +static const struct ata_port_operations at32_port_ops = {
23795 .inherits = &ata_sff_port_ops,
23796 .cable_detect = ata_cable_40wire,
23797 .set_piomode = pata_at32_set_piomode,
23798 diff -urNp linux-2.6.32.42/drivers/ata/pata_at91.c linux-2.6.32.42/drivers/ata/pata_at91.c
23799 --- linux-2.6.32.42/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
23800 +++ linux-2.6.32.42/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
23801 @@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
23802 ATA_PIO_SHT(DRV_NAME),
23803 };
23804
23805 -static struct ata_port_operations pata_at91_port_ops = {
23806 +static const struct ata_port_operations pata_at91_port_ops = {
23807 .inherits = &ata_sff_port_ops,
23808
23809 .sff_data_xfer = pata_at91_data_xfer_noirq,
23810 diff -urNp linux-2.6.32.42/drivers/ata/pata_atiixp.c linux-2.6.32.42/drivers/ata/pata_atiixp.c
23811 --- linux-2.6.32.42/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
23812 +++ linux-2.6.32.42/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
23813 @@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
23814 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23815 };
23816
23817 -static struct ata_port_operations atiixp_port_ops = {
23818 +static const struct ata_port_operations atiixp_port_ops = {
23819 .inherits = &ata_bmdma_port_ops,
23820
23821 .qc_prep = ata_sff_dumb_qc_prep,
23822 diff -urNp linux-2.6.32.42/drivers/ata/pata_atp867x.c linux-2.6.32.42/drivers/ata/pata_atp867x.c
23823 --- linux-2.6.32.42/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
23824 +++ linux-2.6.32.42/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
23825 @@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
23826 ATA_BMDMA_SHT(DRV_NAME),
23827 };
23828
23829 -static struct ata_port_operations atp867x_ops = {
23830 +static const struct ata_port_operations atp867x_ops = {
23831 .inherits = &ata_bmdma_port_ops,
23832 .cable_detect = atp867x_cable_detect,
23833 .set_piomode = atp867x_set_piomode,
23834 diff -urNp linux-2.6.32.42/drivers/ata/pata_bf54x.c linux-2.6.32.42/drivers/ata/pata_bf54x.c
23835 --- linux-2.6.32.42/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
23836 +++ linux-2.6.32.42/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
23837 @@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
23838 .dma_boundary = ATA_DMA_BOUNDARY,
23839 };
23840
23841 -static struct ata_port_operations bfin_pata_ops = {
23842 +static const struct ata_port_operations bfin_pata_ops = {
23843 .inherits = &ata_sff_port_ops,
23844
23845 .set_piomode = bfin_set_piomode,
23846 diff -urNp linux-2.6.32.42/drivers/ata/pata_cmd640.c linux-2.6.32.42/drivers/ata/pata_cmd640.c
23847 --- linux-2.6.32.42/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
23848 +++ linux-2.6.32.42/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
23849 @@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
23850 ATA_BMDMA_SHT(DRV_NAME),
23851 };
23852
23853 -static struct ata_port_operations cmd640_port_ops = {
23854 +static const struct ata_port_operations cmd640_port_ops = {
23855 .inherits = &ata_bmdma_port_ops,
23856 /* In theory xfer_noirq is not needed once we kill the prefetcher */
23857 .sff_data_xfer = ata_sff_data_xfer_noirq,
23858 diff -urNp linux-2.6.32.42/drivers/ata/pata_cmd64x.c linux-2.6.32.42/drivers/ata/pata_cmd64x.c
23859 --- linux-2.6.32.42/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
23860 +++ linux-2.6.32.42/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
23861 @@ -271,18 +271,18 @@ static const struct ata_port_operations
23862 .set_dmamode = cmd64x_set_dmamode,
23863 };
23864
23865 -static struct ata_port_operations cmd64x_port_ops = {
23866 +static const struct ata_port_operations cmd64x_port_ops = {
23867 .inherits = &cmd64x_base_ops,
23868 .cable_detect = ata_cable_40wire,
23869 };
23870
23871 -static struct ata_port_operations cmd646r1_port_ops = {
23872 +static const struct ata_port_operations cmd646r1_port_ops = {
23873 .inherits = &cmd64x_base_ops,
23874 .bmdma_stop = cmd646r1_bmdma_stop,
23875 .cable_detect = ata_cable_40wire,
23876 };
23877
23878 -static struct ata_port_operations cmd648_port_ops = {
23879 +static const struct ata_port_operations cmd648_port_ops = {
23880 .inherits = &cmd64x_base_ops,
23881 .bmdma_stop = cmd648_bmdma_stop,
23882 .cable_detect = cmd648_cable_detect,
23883 diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5520.c linux-2.6.32.42/drivers/ata/pata_cs5520.c
23884 --- linux-2.6.32.42/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
23885 +++ linux-2.6.32.42/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
23886 @@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
23887 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23888 };
23889
23890 -static struct ata_port_operations cs5520_port_ops = {
23891 +static const struct ata_port_operations cs5520_port_ops = {
23892 .inherits = &ata_bmdma_port_ops,
23893 .qc_prep = ata_sff_dumb_qc_prep,
23894 .cable_detect = ata_cable_40wire,
23895 diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5530.c linux-2.6.32.42/drivers/ata/pata_cs5530.c
23896 --- linux-2.6.32.42/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
23897 +++ linux-2.6.32.42/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
23898 @@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
23899 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
23900 };
23901
23902 -static struct ata_port_operations cs5530_port_ops = {
23903 +static const struct ata_port_operations cs5530_port_ops = {
23904 .inherits = &ata_bmdma_port_ops,
23905
23906 .qc_prep = ata_sff_dumb_qc_prep,
23907 diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5535.c linux-2.6.32.42/drivers/ata/pata_cs5535.c
23908 --- linux-2.6.32.42/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
23909 +++ linux-2.6.32.42/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
23910 @@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
23911 ATA_BMDMA_SHT(DRV_NAME),
23912 };
23913
23914 -static struct ata_port_operations cs5535_port_ops = {
23915 +static const struct ata_port_operations cs5535_port_ops = {
23916 .inherits = &ata_bmdma_port_ops,
23917 .cable_detect = cs5535_cable_detect,
23918 .set_piomode = cs5535_set_piomode,
23919 diff -urNp linux-2.6.32.42/drivers/ata/pata_cs5536.c linux-2.6.32.42/drivers/ata/pata_cs5536.c
23920 --- linux-2.6.32.42/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
23921 +++ linux-2.6.32.42/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
23922 @@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
23923 ATA_BMDMA_SHT(DRV_NAME),
23924 };
23925
23926 -static struct ata_port_operations cs5536_port_ops = {
23927 +static const struct ata_port_operations cs5536_port_ops = {
23928 .inherits = &ata_bmdma_port_ops,
23929 .cable_detect = cs5536_cable_detect,
23930 .set_piomode = cs5536_set_piomode,
23931 diff -urNp linux-2.6.32.42/drivers/ata/pata_cypress.c linux-2.6.32.42/drivers/ata/pata_cypress.c
23932 --- linux-2.6.32.42/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
23933 +++ linux-2.6.32.42/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
23934 @@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
23935 ATA_BMDMA_SHT(DRV_NAME),
23936 };
23937
23938 -static struct ata_port_operations cy82c693_port_ops = {
23939 +static const struct ata_port_operations cy82c693_port_ops = {
23940 .inherits = &ata_bmdma_port_ops,
23941 .cable_detect = ata_cable_40wire,
23942 .set_piomode = cy82c693_set_piomode,
23943 diff -urNp linux-2.6.32.42/drivers/ata/pata_efar.c linux-2.6.32.42/drivers/ata/pata_efar.c
23944 --- linux-2.6.32.42/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
23945 +++ linux-2.6.32.42/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
23946 @@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
23947 ATA_BMDMA_SHT(DRV_NAME),
23948 };
23949
23950 -static struct ata_port_operations efar_ops = {
23951 +static const struct ata_port_operations efar_ops = {
23952 .inherits = &ata_bmdma_port_ops,
23953 .cable_detect = efar_cable_detect,
23954 .set_piomode = efar_set_piomode,
23955 diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt366.c linux-2.6.32.42/drivers/ata/pata_hpt366.c
23956 --- linux-2.6.32.42/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
23957 +++ linux-2.6.32.42/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
23958 @@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
23959 * Configuration for HPT366/68
23960 */
23961
23962 -static struct ata_port_operations hpt366_port_ops = {
23963 +static const struct ata_port_operations hpt366_port_ops = {
23964 .inherits = &ata_bmdma_port_ops,
23965 .cable_detect = hpt36x_cable_detect,
23966 .mode_filter = hpt366_filter,
23967 diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt37x.c linux-2.6.32.42/drivers/ata/pata_hpt37x.c
23968 --- linux-2.6.32.42/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
23969 +++ linux-2.6.32.42/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
23970 @@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
23971 * Configuration for HPT370
23972 */
23973
23974 -static struct ata_port_operations hpt370_port_ops = {
23975 +static const struct ata_port_operations hpt370_port_ops = {
23976 .inherits = &ata_bmdma_port_ops,
23977
23978 .bmdma_stop = hpt370_bmdma_stop,
23979 @@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
23980 * Configuration for HPT370A. Close to 370 but less filters
23981 */
23982
23983 -static struct ata_port_operations hpt370a_port_ops = {
23984 +static const struct ata_port_operations hpt370a_port_ops = {
23985 .inherits = &hpt370_port_ops,
23986 .mode_filter = hpt370a_filter,
23987 };
23988 @@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
23989 * and DMA mode setting functionality.
23990 */
23991
23992 -static struct ata_port_operations hpt372_port_ops = {
23993 +static const struct ata_port_operations hpt372_port_ops = {
23994 .inherits = &ata_bmdma_port_ops,
23995
23996 .bmdma_stop = hpt37x_bmdma_stop,
23997 @@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
23998 * but we have a different cable detection procedure for function 1.
23999 */
24000
24001 -static struct ata_port_operations hpt374_fn1_port_ops = {
24002 +static const struct ata_port_operations hpt374_fn1_port_ops = {
24003 .inherits = &hpt372_port_ops,
24004 .prereset = hpt374_fn1_pre_reset,
24005 };
24006 diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c
24007 --- linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
24008 +++ linux-2.6.32.42/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
24009 @@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
24010 * Configuration for HPT3x2n.
24011 */
24012
24013 -static struct ata_port_operations hpt3x2n_port_ops = {
24014 +static const struct ata_port_operations hpt3x2n_port_ops = {
24015 .inherits = &ata_bmdma_port_ops,
24016
24017 .bmdma_stop = hpt3x2n_bmdma_stop,
24018 diff -urNp linux-2.6.32.42/drivers/ata/pata_hpt3x3.c linux-2.6.32.42/drivers/ata/pata_hpt3x3.c
24019 --- linux-2.6.32.42/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
24020 +++ linux-2.6.32.42/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
24021 @@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
24022 ATA_BMDMA_SHT(DRV_NAME),
24023 };
24024
24025 -static struct ata_port_operations hpt3x3_port_ops = {
24026 +static const struct ata_port_operations hpt3x3_port_ops = {
24027 .inherits = &ata_bmdma_port_ops,
24028 .cable_detect = ata_cable_40wire,
24029 .set_piomode = hpt3x3_set_piomode,
24030 diff -urNp linux-2.6.32.42/drivers/ata/pata_icside.c linux-2.6.32.42/drivers/ata/pata_icside.c
24031 --- linux-2.6.32.42/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
24032 +++ linux-2.6.32.42/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
24033 @@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
24034 }
24035 }
24036
24037 -static struct ata_port_operations pata_icside_port_ops = {
24038 +static const struct ata_port_operations pata_icside_port_ops = {
24039 .inherits = &ata_sff_port_ops,
24040 /* no need to build any PRD tables for DMA */
24041 .qc_prep = ata_noop_qc_prep,
24042 diff -urNp linux-2.6.32.42/drivers/ata/pata_isapnp.c linux-2.6.32.42/drivers/ata/pata_isapnp.c
24043 --- linux-2.6.32.42/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
24044 +++ linux-2.6.32.42/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
24045 @@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
24046 ATA_PIO_SHT(DRV_NAME),
24047 };
24048
24049 -static struct ata_port_operations isapnp_port_ops = {
24050 +static const struct ata_port_operations isapnp_port_ops = {
24051 .inherits = &ata_sff_port_ops,
24052 .cable_detect = ata_cable_40wire,
24053 };
24054
24055 -static struct ata_port_operations isapnp_noalt_port_ops = {
24056 +static const struct ata_port_operations isapnp_noalt_port_ops = {
24057 .inherits = &ata_sff_port_ops,
24058 .cable_detect = ata_cable_40wire,
24059 /* No altstatus so we don't want to use the lost interrupt poll */
24060 diff -urNp linux-2.6.32.42/drivers/ata/pata_it8213.c linux-2.6.32.42/drivers/ata/pata_it8213.c
24061 --- linux-2.6.32.42/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
24062 +++ linux-2.6.32.42/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
24063 @@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
24064 };
24065
24066
24067 -static struct ata_port_operations it8213_ops = {
24068 +static const struct ata_port_operations it8213_ops = {
24069 .inherits = &ata_bmdma_port_ops,
24070 .cable_detect = it8213_cable_detect,
24071 .set_piomode = it8213_set_piomode,
24072 diff -urNp linux-2.6.32.42/drivers/ata/pata_it821x.c linux-2.6.32.42/drivers/ata/pata_it821x.c
24073 --- linux-2.6.32.42/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
24074 +++ linux-2.6.32.42/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
24075 @@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
24076 ATA_BMDMA_SHT(DRV_NAME),
24077 };
24078
24079 -static struct ata_port_operations it821x_smart_port_ops = {
24080 +static const struct ata_port_operations it821x_smart_port_ops = {
24081 .inherits = &ata_bmdma_port_ops,
24082
24083 .check_atapi_dma= it821x_check_atapi_dma,
24084 @@ -814,7 +814,7 @@ static struct ata_port_operations it821x
24085 .port_start = it821x_port_start,
24086 };
24087
24088 -static struct ata_port_operations it821x_passthru_port_ops = {
24089 +static const struct ata_port_operations it821x_passthru_port_ops = {
24090 .inherits = &ata_bmdma_port_ops,
24091
24092 .check_atapi_dma= it821x_check_atapi_dma,
24093 @@ -830,7 +830,7 @@ static struct ata_port_operations it821x
24094 .port_start = it821x_port_start,
24095 };
24096
24097 -static struct ata_port_operations it821x_rdc_port_ops = {
24098 +static const struct ata_port_operations it821x_rdc_port_ops = {
24099 .inherits = &ata_bmdma_port_ops,
24100
24101 .check_atapi_dma= it821x_check_atapi_dma,
24102 diff -urNp linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c
24103 --- linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
24104 +++ linux-2.6.32.42/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
24105 @@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
24106 ATA_PIO_SHT(DRV_NAME),
24107 };
24108
24109 -static struct ata_port_operations ixp4xx_port_ops = {
24110 +static const struct ata_port_operations ixp4xx_port_ops = {
24111 .inherits = &ata_sff_port_ops,
24112 .sff_data_xfer = ixp4xx_mmio_data_xfer,
24113 .cable_detect = ata_cable_40wire,
24114 diff -urNp linux-2.6.32.42/drivers/ata/pata_jmicron.c linux-2.6.32.42/drivers/ata/pata_jmicron.c
24115 --- linux-2.6.32.42/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
24116 +++ linux-2.6.32.42/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
24117 @@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
24118 ATA_BMDMA_SHT(DRV_NAME),
24119 };
24120
24121 -static struct ata_port_operations jmicron_ops = {
24122 +static const struct ata_port_operations jmicron_ops = {
24123 .inherits = &ata_bmdma_port_ops,
24124 .prereset = jmicron_pre_reset,
24125 };
24126 diff -urNp linux-2.6.32.42/drivers/ata/pata_legacy.c linux-2.6.32.42/drivers/ata/pata_legacy.c
24127 --- linux-2.6.32.42/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
24128 +++ linux-2.6.32.42/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
24129 @@ -106,7 +106,7 @@ struct legacy_probe {
24130
24131 struct legacy_controller {
24132 const char *name;
24133 - struct ata_port_operations *ops;
24134 + const struct ata_port_operations *ops;
24135 unsigned int pio_mask;
24136 unsigned int flags;
24137 unsigned int pflags;
24138 @@ -223,12 +223,12 @@ static const struct ata_port_operations
24139 * pio_mask as well.
24140 */
24141
24142 -static struct ata_port_operations simple_port_ops = {
24143 +static const struct ata_port_operations simple_port_ops = {
24144 .inherits = &legacy_base_port_ops,
24145 .sff_data_xfer = ata_sff_data_xfer_noirq,
24146 };
24147
24148 -static struct ata_port_operations legacy_port_ops = {
24149 +static const struct ata_port_operations legacy_port_ops = {
24150 .inherits = &legacy_base_port_ops,
24151 .sff_data_xfer = ata_sff_data_xfer_noirq,
24152 .set_mode = legacy_set_mode,
24153 @@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
24154 return buflen;
24155 }
24156
24157 -static struct ata_port_operations pdc20230_port_ops = {
24158 +static const struct ata_port_operations pdc20230_port_ops = {
24159 .inherits = &legacy_base_port_ops,
24160 .set_piomode = pdc20230_set_piomode,
24161 .sff_data_xfer = pdc_data_xfer_vlb,
24162 @@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
24163 ioread8(ap->ioaddr.status_addr);
24164 }
24165
24166 -static struct ata_port_operations ht6560a_port_ops = {
24167 +static const struct ata_port_operations ht6560a_port_ops = {
24168 .inherits = &legacy_base_port_ops,
24169 .set_piomode = ht6560a_set_piomode,
24170 };
24171 @@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
24172 ioread8(ap->ioaddr.status_addr);
24173 }
24174
24175 -static struct ata_port_operations ht6560b_port_ops = {
24176 +static const struct ata_port_operations ht6560b_port_ops = {
24177 .inherits = &legacy_base_port_ops,
24178 .set_piomode = ht6560b_set_piomode,
24179 };
24180 @@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
24181 }
24182
24183
24184 -static struct ata_port_operations opti82c611a_port_ops = {
24185 +static const struct ata_port_operations opti82c611a_port_ops = {
24186 .inherits = &legacy_base_port_ops,
24187 .set_piomode = opti82c611a_set_piomode,
24188 };
24189 @@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
24190 return ata_sff_qc_issue(qc);
24191 }
24192
24193 -static struct ata_port_operations opti82c46x_port_ops = {
24194 +static const struct ata_port_operations opti82c46x_port_ops = {
24195 .inherits = &legacy_base_port_ops,
24196 .set_piomode = opti82c46x_set_piomode,
24197 .qc_issue = opti82c46x_qc_issue,
24198 @@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
24199 return 0;
24200 }
24201
24202 -static struct ata_port_operations qdi6500_port_ops = {
24203 +static const struct ata_port_operations qdi6500_port_ops = {
24204 .inherits = &legacy_base_port_ops,
24205 .set_piomode = qdi6500_set_piomode,
24206 .qc_issue = qdi_qc_issue,
24207 .sff_data_xfer = vlb32_data_xfer,
24208 };
24209
24210 -static struct ata_port_operations qdi6580_port_ops = {
24211 +static const struct ata_port_operations qdi6580_port_ops = {
24212 .inherits = &legacy_base_port_ops,
24213 .set_piomode = qdi6580_set_piomode,
24214 .sff_data_xfer = vlb32_data_xfer,
24215 };
24216
24217 -static struct ata_port_operations qdi6580dp_port_ops = {
24218 +static const struct ata_port_operations qdi6580dp_port_ops = {
24219 .inherits = &legacy_base_port_ops,
24220 .set_piomode = qdi6580dp_set_piomode,
24221 .sff_data_xfer = vlb32_data_xfer,
24222 @@ -855,7 +855,7 @@ static int winbond_port(struct platform_
24223 return 0;
24224 }
24225
24226 -static struct ata_port_operations winbond_port_ops = {
24227 +static const struct ata_port_operations winbond_port_ops = {
24228 .inherits = &legacy_base_port_ops,
24229 .set_piomode = winbond_set_piomode,
24230 .sff_data_xfer = vlb32_data_xfer,
24231 @@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
24232 int pio_modes = controller->pio_mask;
24233 unsigned long io = probe->port;
24234 u32 mask = (1 << probe->slot);
24235 - struct ata_port_operations *ops = controller->ops;
24236 + const struct ata_port_operations *ops = controller->ops;
24237 struct legacy_data *ld = &legacy_data[probe->slot];
24238 struct ata_host *host = NULL;
24239 struct ata_port *ap;
24240 diff -urNp linux-2.6.32.42/drivers/ata/pata_marvell.c linux-2.6.32.42/drivers/ata/pata_marvell.c
24241 --- linux-2.6.32.42/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
24242 +++ linux-2.6.32.42/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
24243 @@ -100,7 +100,7 @@ static struct scsi_host_template marvell
24244 ATA_BMDMA_SHT(DRV_NAME),
24245 };
24246
24247 -static struct ata_port_operations marvell_ops = {
24248 +static const struct ata_port_operations marvell_ops = {
24249 .inherits = &ata_bmdma_port_ops,
24250 .cable_detect = marvell_cable_detect,
24251 .prereset = marvell_pre_reset,
24252 diff -urNp linux-2.6.32.42/drivers/ata/pata_mpc52xx.c linux-2.6.32.42/drivers/ata/pata_mpc52xx.c
24253 --- linux-2.6.32.42/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
24254 +++ linux-2.6.32.42/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
24255 @@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
24256 ATA_PIO_SHT(DRV_NAME),
24257 };
24258
24259 -static struct ata_port_operations mpc52xx_ata_port_ops = {
24260 +static const struct ata_port_operations mpc52xx_ata_port_ops = {
24261 .inherits = &ata_bmdma_port_ops,
24262 .sff_dev_select = mpc52xx_ata_dev_select,
24263 .set_piomode = mpc52xx_ata_set_piomode,
24264 diff -urNp linux-2.6.32.42/drivers/ata/pata_mpiix.c linux-2.6.32.42/drivers/ata/pata_mpiix.c
24265 --- linux-2.6.32.42/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
24266 +++ linux-2.6.32.42/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
24267 @@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
24268 ATA_PIO_SHT(DRV_NAME),
24269 };
24270
24271 -static struct ata_port_operations mpiix_port_ops = {
24272 +static const struct ata_port_operations mpiix_port_ops = {
24273 .inherits = &ata_sff_port_ops,
24274 .qc_issue = mpiix_qc_issue,
24275 .cable_detect = ata_cable_40wire,
24276 diff -urNp linux-2.6.32.42/drivers/ata/pata_netcell.c linux-2.6.32.42/drivers/ata/pata_netcell.c
24277 --- linux-2.6.32.42/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
24278 +++ linux-2.6.32.42/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
24279 @@ -34,7 +34,7 @@ static struct scsi_host_template netcell
24280 ATA_BMDMA_SHT(DRV_NAME),
24281 };
24282
24283 -static struct ata_port_operations netcell_ops = {
24284 +static const struct ata_port_operations netcell_ops = {
24285 .inherits = &ata_bmdma_port_ops,
24286 .cable_detect = ata_cable_80wire,
24287 .read_id = netcell_read_id,
24288 diff -urNp linux-2.6.32.42/drivers/ata/pata_ninja32.c linux-2.6.32.42/drivers/ata/pata_ninja32.c
24289 --- linux-2.6.32.42/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
24290 +++ linux-2.6.32.42/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
24291 @@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
24292 ATA_BMDMA_SHT(DRV_NAME),
24293 };
24294
24295 -static struct ata_port_operations ninja32_port_ops = {
24296 +static const struct ata_port_operations ninja32_port_ops = {
24297 .inherits = &ata_bmdma_port_ops,
24298 .sff_dev_select = ninja32_dev_select,
24299 .cable_detect = ata_cable_40wire,
24300 diff -urNp linux-2.6.32.42/drivers/ata/pata_ns87410.c linux-2.6.32.42/drivers/ata/pata_ns87410.c
24301 --- linux-2.6.32.42/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
24302 +++ linux-2.6.32.42/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
24303 @@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
24304 ATA_PIO_SHT(DRV_NAME),
24305 };
24306
24307 -static struct ata_port_operations ns87410_port_ops = {
24308 +static const struct ata_port_operations ns87410_port_ops = {
24309 .inherits = &ata_sff_port_ops,
24310 .qc_issue = ns87410_qc_issue,
24311 .cable_detect = ata_cable_40wire,
24312 diff -urNp linux-2.6.32.42/drivers/ata/pata_ns87415.c linux-2.6.32.42/drivers/ata/pata_ns87415.c
24313 --- linux-2.6.32.42/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
24314 +++ linux-2.6.32.42/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
24315 @@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
24316 }
24317 #endif /* 87560 SuperIO Support */
24318
24319 -static struct ata_port_operations ns87415_pata_ops = {
24320 +static const struct ata_port_operations ns87415_pata_ops = {
24321 .inherits = &ata_bmdma_port_ops,
24322
24323 .check_atapi_dma = ns87415_check_atapi_dma,
24324 @@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
24325 };
24326
24327 #if defined(CONFIG_SUPERIO)
24328 -static struct ata_port_operations ns87560_pata_ops = {
24329 +static const struct ata_port_operations ns87560_pata_ops = {
24330 .inherits = &ns87415_pata_ops,
24331 .sff_tf_read = ns87560_tf_read,
24332 .sff_check_status = ns87560_check_status,
24333 diff -urNp linux-2.6.32.42/drivers/ata/pata_octeon_cf.c linux-2.6.32.42/drivers/ata/pata_octeon_cf.c
24334 --- linux-2.6.32.42/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
24335 +++ linux-2.6.32.42/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
24336 @@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
24337 return 0;
24338 }
24339
24340 +/* cannot be const */
24341 static struct ata_port_operations octeon_cf_ops = {
24342 .inherits = &ata_sff_port_ops,
24343 .check_atapi_dma = octeon_cf_check_atapi_dma,
24344 diff -urNp linux-2.6.32.42/drivers/ata/pata_oldpiix.c linux-2.6.32.42/drivers/ata/pata_oldpiix.c
24345 --- linux-2.6.32.42/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
24346 +++ linux-2.6.32.42/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
24347 @@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
24348 ATA_BMDMA_SHT(DRV_NAME),
24349 };
24350
24351 -static struct ata_port_operations oldpiix_pata_ops = {
24352 +static const struct ata_port_operations oldpiix_pata_ops = {
24353 .inherits = &ata_bmdma_port_ops,
24354 .qc_issue = oldpiix_qc_issue,
24355 .cable_detect = ata_cable_40wire,
24356 diff -urNp linux-2.6.32.42/drivers/ata/pata_opti.c linux-2.6.32.42/drivers/ata/pata_opti.c
24357 --- linux-2.6.32.42/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
24358 +++ linux-2.6.32.42/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
24359 @@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
24360 ATA_PIO_SHT(DRV_NAME),
24361 };
24362
24363 -static struct ata_port_operations opti_port_ops = {
24364 +static const struct ata_port_operations opti_port_ops = {
24365 .inherits = &ata_sff_port_ops,
24366 .cable_detect = ata_cable_40wire,
24367 .set_piomode = opti_set_piomode,
24368 diff -urNp linux-2.6.32.42/drivers/ata/pata_optidma.c linux-2.6.32.42/drivers/ata/pata_optidma.c
24369 --- linux-2.6.32.42/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
24370 +++ linux-2.6.32.42/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
24371 @@ -337,7 +337,7 @@ static struct scsi_host_template optidma
24372 ATA_BMDMA_SHT(DRV_NAME),
24373 };
24374
24375 -static struct ata_port_operations optidma_port_ops = {
24376 +static const struct ata_port_operations optidma_port_ops = {
24377 .inherits = &ata_bmdma_port_ops,
24378 .cable_detect = ata_cable_40wire,
24379 .set_piomode = optidma_set_pio_mode,
24380 @@ -346,7 +346,7 @@ static struct ata_port_operations optidm
24381 .prereset = optidma_pre_reset,
24382 };
24383
24384 -static struct ata_port_operations optiplus_port_ops = {
24385 +static const struct ata_port_operations optiplus_port_ops = {
24386 .inherits = &optidma_port_ops,
24387 .set_piomode = optiplus_set_pio_mode,
24388 .set_dmamode = optiplus_set_dma_mode,
24389 diff -urNp linux-2.6.32.42/drivers/ata/pata_palmld.c linux-2.6.32.42/drivers/ata/pata_palmld.c
24390 --- linux-2.6.32.42/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
24391 +++ linux-2.6.32.42/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
24392 @@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
24393 ATA_PIO_SHT(DRV_NAME),
24394 };
24395
24396 -static struct ata_port_operations palmld_port_ops = {
24397 +static const struct ata_port_operations palmld_port_ops = {
24398 .inherits = &ata_sff_port_ops,
24399 .sff_data_xfer = ata_sff_data_xfer_noirq,
24400 .cable_detect = ata_cable_40wire,
24401 diff -urNp linux-2.6.32.42/drivers/ata/pata_pcmcia.c linux-2.6.32.42/drivers/ata/pata_pcmcia.c
24402 --- linux-2.6.32.42/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
24403 +++ linux-2.6.32.42/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
24404 @@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
24405 ATA_PIO_SHT(DRV_NAME),
24406 };
24407
24408 -static struct ata_port_operations pcmcia_port_ops = {
24409 +static const struct ata_port_operations pcmcia_port_ops = {
24410 .inherits = &ata_sff_port_ops,
24411 .sff_data_xfer = ata_sff_data_xfer_noirq,
24412 .cable_detect = ata_cable_40wire,
24413 .set_mode = pcmcia_set_mode,
24414 };
24415
24416 -static struct ata_port_operations pcmcia_8bit_port_ops = {
24417 +static const struct ata_port_operations pcmcia_8bit_port_ops = {
24418 .inherits = &ata_sff_port_ops,
24419 .sff_data_xfer = ata_data_xfer_8bit,
24420 .cable_detect = ata_cable_40wire,
24421 @@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
24422 unsigned long io_base, ctl_base;
24423 void __iomem *io_addr, *ctl_addr;
24424 int n_ports = 1;
24425 - struct ata_port_operations *ops = &pcmcia_port_ops;
24426 + const struct ata_port_operations *ops = &pcmcia_port_ops;
24427
24428 info = kzalloc(sizeof(*info), GFP_KERNEL);
24429 if (info == NULL)
24430 diff -urNp linux-2.6.32.42/drivers/ata/pata_pdc2027x.c linux-2.6.32.42/drivers/ata/pata_pdc2027x.c
24431 --- linux-2.6.32.42/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
24432 +++ linux-2.6.32.42/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
24433 @@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
24434 ATA_BMDMA_SHT(DRV_NAME),
24435 };
24436
24437 -static struct ata_port_operations pdc2027x_pata100_ops = {
24438 +static const struct ata_port_operations pdc2027x_pata100_ops = {
24439 .inherits = &ata_bmdma_port_ops,
24440 .check_atapi_dma = pdc2027x_check_atapi_dma,
24441 .cable_detect = pdc2027x_cable_detect,
24442 .prereset = pdc2027x_prereset,
24443 };
24444
24445 -static struct ata_port_operations pdc2027x_pata133_ops = {
24446 +static const struct ata_port_operations pdc2027x_pata133_ops = {
24447 .inherits = &pdc2027x_pata100_ops,
24448 .mode_filter = pdc2027x_mode_filter,
24449 .set_piomode = pdc2027x_set_piomode,
24450 diff -urNp linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c
24451 --- linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
24452 +++ linux-2.6.32.42/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
24453 @@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
24454 ATA_BMDMA_SHT(DRV_NAME),
24455 };
24456
24457 -static struct ata_port_operations pdc2024x_port_ops = {
24458 +static const struct ata_port_operations pdc2024x_port_ops = {
24459 .inherits = &ata_bmdma_port_ops,
24460
24461 .cable_detect = ata_cable_40wire,
24462 @@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
24463 .sff_exec_command = pdc202xx_exec_command,
24464 };
24465
24466 -static struct ata_port_operations pdc2026x_port_ops = {
24467 +static const struct ata_port_operations pdc2026x_port_ops = {
24468 .inherits = &pdc2024x_port_ops,
24469
24470 .check_atapi_dma = pdc2026x_check_atapi_dma,
24471 diff -urNp linux-2.6.32.42/drivers/ata/pata_platform.c linux-2.6.32.42/drivers/ata/pata_platform.c
24472 --- linux-2.6.32.42/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
24473 +++ linux-2.6.32.42/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
24474 @@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
24475 ATA_PIO_SHT(DRV_NAME),
24476 };
24477
24478 -static struct ata_port_operations pata_platform_port_ops = {
24479 +static const struct ata_port_operations pata_platform_port_ops = {
24480 .inherits = &ata_sff_port_ops,
24481 .sff_data_xfer = ata_sff_data_xfer_noirq,
24482 .cable_detect = ata_cable_unknown,
24483 diff -urNp linux-2.6.32.42/drivers/ata/pata_qdi.c linux-2.6.32.42/drivers/ata/pata_qdi.c
24484 --- linux-2.6.32.42/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
24485 +++ linux-2.6.32.42/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
24486 @@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
24487 ATA_PIO_SHT(DRV_NAME),
24488 };
24489
24490 -static struct ata_port_operations qdi6500_port_ops = {
24491 +static const struct ata_port_operations qdi6500_port_ops = {
24492 .inherits = &ata_sff_port_ops,
24493 .qc_issue = qdi_qc_issue,
24494 .sff_data_xfer = qdi_data_xfer,
24495 @@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
24496 .set_piomode = qdi6500_set_piomode,
24497 };
24498
24499 -static struct ata_port_operations qdi6580_port_ops = {
24500 +static const struct ata_port_operations qdi6580_port_ops = {
24501 .inherits = &qdi6500_port_ops,
24502 .set_piomode = qdi6580_set_piomode,
24503 };
24504 diff -urNp linux-2.6.32.42/drivers/ata/pata_radisys.c linux-2.6.32.42/drivers/ata/pata_radisys.c
24505 --- linux-2.6.32.42/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
24506 +++ linux-2.6.32.42/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
24507 @@ -187,7 +187,7 @@ static struct scsi_host_template radisys
24508 ATA_BMDMA_SHT(DRV_NAME),
24509 };
24510
24511 -static struct ata_port_operations radisys_pata_ops = {
24512 +static const struct ata_port_operations radisys_pata_ops = {
24513 .inherits = &ata_bmdma_port_ops,
24514 .qc_issue = radisys_qc_issue,
24515 .cable_detect = ata_cable_unknown,
24516 diff -urNp linux-2.6.32.42/drivers/ata/pata_rb532_cf.c linux-2.6.32.42/drivers/ata/pata_rb532_cf.c
24517 --- linux-2.6.32.42/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
24518 +++ linux-2.6.32.42/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
24519 @@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
24520 return IRQ_HANDLED;
24521 }
24522
24523 -static struct ata_port_operations rb532_pata_port_ops = {
24524 +static const struct ata_port_operations rb532_pata_port_ops = {
24525 .inherits = &ata_sff_port_ops,
24526 .sff_data_xfer = ata_sff_data_xfer32,
24527 };
24528 diff -urNp linux-2.6.32.42/drivers/ata/pata_rdc.c linux-2.6.32.42/drivers/ata/pata_rdc.c
24529 --- linux-2.6.32.42/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
24530 +++ linux-2.6.32.42/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
24531 @@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
24532 pci_write_config_byte(dev, 0x48, udma_enable);
24533 }
24534
24535 -static struct ata_port_operations rdc_pata_ops = {
24536 +static const struct ata_port_operations rdc_pata_ops = {
24537 .inherits = &ata_bmdma32_port_ops,
24538 .cable_detect = rdc_pata_cable_detect,
24539 .set_piomode = rdc_set_piomode,
24540 diff -urNp linux-2.6.32.42/drivers/ata/pata_rz1000.c linux-2.6.32.42/drivers/ata/pata_rz1000.c
24541 --- linux-2.6.32.42/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
24542 +++ linux-2.6.32.42/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
24543 @@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
24544 ATA_PIO_SHT(DRV_NAME),
24545 };
24546
24547 -static struct ata_port_operations rz1000_port_ops = {
24548 +static const struct ata_port_operations rz1000_port_ops = {
24549 .inherits = &ata_sff_port_ops,
24550 .cable_detect = ata_cable_40wire,
24551 .set_mode = rz1000_set_mode,
24552 diff -urNp linux-2.6.32.42/drivers/ata/pata_sc1200.c linux-2.6.32.42/drivers/ata/pata_sc1200.c
24553 --- linux-2.6.32.42/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
24554 +++ linux-2.6.32.42/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
24555 @@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
24556 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24557 };
24558
24559 -static struct ata_port_operations sc1200_port_ops = {
24560 +static const struct ata_port_operations sc1200_port_ops = {
24561 .inherits = &ata_bmdma_port_ops,
24562 .qc_prep = ata_sff_dumb_qc_prep,
24563 .qc_issue = sc1200_qc_issue,
24564 diff -urNp linux-2.6.32.42/drivers/ata/pata_scc.c linux-2.6.32.42/drivers/ata/pata_scc.c
24565 --- linux-2.6.32.42/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
24566 +++ linux-2.6.32.42/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
24567 @@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
24568 ATA_BMDMA_SHT(DRV_NAME),
24569 };
24570
24571 -static struct ata_port_operations scc_pata_ops = {
24572 +static const struct ata_port_operations scc_pata_ops = {
24573 .inherits = &ata_bmdma_port_ops,
24574
24575 .set_piomode = scc_set_piomode,
24576 diff -urNp linux-2.6.32.42/drivers/ata/pata_sch.c linux-2.6.32.42/drivers/ata/pata_sch.c
24577 --- linux-2.6.32.42/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
24578 +++ linux-2.6.32.42/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
24579 @@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
24580 ATA_BMDMA_SHT(DRV_NAME),
24581 };
24582
24583 -static struct ata_port_operations sch_pata_ops = {
24584 +static const struct ata_port_operations sch_pata_ops = {
24585 .inherits = &ata_bmdma_port_ops,
24586 .cable_detect = ata_cable_unknown,
24587 .set_piomode = sch_set_piomode,
24588 diff -urNp linux-2.6.32.42/drivers/ata/pata_serverworks.c linux-2.6.32.42/drivers/ata/pata_serverworks.c
24589 --- linux-2.6.32.42/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
24590 +++ linux-2.6.32.42/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
24591 @@ -299,7 +299,7 @@ static struct scsi_host_template serverw
24592 ATA_BMDMA_SHT(DRV_NAME),
24593 };
24594
24595 -static struct ata_port_operations serverworks_osb4_port_ops = {
24596 +static const struct ata_port_operations serverworks_osb4_port_ops = {
24597 .inherits = &ata_bmdma_port_ops,
24598 .cable_detect = serverworks_cable_detect,
24599 .mode_filter = serverworks_osb4_filter,
24600 @@ -307,7 +307,7 @@ static struct ata_port_operations server
24601 .set_dmamode = serverworks_set_dmamode,
24602 };
24603
24604 -static struct ata_port_operations serverworks_csb_port_ops = {
24605 +static const struct ata_port_operations serverworks_csb_port_ops = {
24606 .inherits = &serverworks_osb4_port_ops,
24607 .mode_filter = serverworks_csb_filter,
24608 };
24609 diff -urNp linux-2.6.32.42/drivers/ata/pata_sil680.c linux-2.6.32.42/drivers/ata/pata_sil680.c
24610 --- linux-2.6.32.42/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
24611 +++ linux-2.6.32.42/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
24612 @@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
24613 ATA_BMDMA_SHT(DRV_NAME),
24614 };
24615
24616 -static struct ata_port_operations sil680_port_ops = {
24617 +static const struct ata_port_operations sil680_port_ops = {
24618 .inherits = &ata_bmdma32_port_ops,
24619 .cable_detect = sil680_cable_detect,
24620 .set_piomode = sil680_set_piomode,
24621 diff -urNp linux-2.6.32.42/drivers/ata/pata_sis.c linux-2.6.32.42/drivers/ata/pata_sis.c
24622 --- linux-2.6.32.42/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
24623 +++ linux-2.6.32.42/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
24624 @@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
24625 ATA_BMDMA_SHT(DRV_NAME),
24626 };
24627
24628 -static struct ata_port_operations sis_133_for_sata_ops = {
24629 +static const struct ata_port_operations sis_133_for_sata_ops = {
24630 .inherits = &ata_bmdma_port_ops,
24631 .set_piomode = sis_133_set_piomode,
24632 .set_dmamode = sis_133_set_dmamode,
24633 .cable_detect = sis_133_cable_detect,
24634 };
24635
24636 -static struct ata_port_operations sis_base_ops = {
24637 +static const struct ata_port_operations sis_base_ops = {
24638 .inherits = &ata_bmdma_port_ops,
24639 .prereset = sis_pre_reset,
24640 };
24641
24642 -static struct ata_port_operations sis_133_ops = {
24643 +static const struct ata_port_operations sis_133_ops = {
24644 .inherits = &sis_base_ops,
24645 .set_piomode = sis_133_set_piomode,
24646 .set_dmamode = sis_133_set_dmamode,
24647 .cable_detect = sis_133_cable_detect,
24648 };
24649
24650 -static struct ata_port_operations sis_133_early_ops = {
24651 +static const struct ata_port_operations sis_133_early_ops = {
24652 .inherits = &sis_base_ops,
24653 .set_piomode = sis_100_set_piomode,
24654 .set_dmamode = sis_133_early_set_dmamode,
24655 .cable_detect = sis_66_cable_detect,
24656 };
24657
24658 -static struct ata_port_operations sis_100_ops = {
24659 +static const struct ata_port_operations sis_100_ops = {
24660 .inherits = &sis_base_ops,
24661 .set_piomode = sis_100_set_piomode,
24662 .set_dmamode = sis_100_set_dmamode,
24663 .cable_detect = sis_66_cable_detect,
24664 };
24665
24666 -static struct ata_port_operations sis_66_ops = {
24667 +static const struct ata_port_operations sis_66_ops = {
24668 .inherits = &sis_base_ops,
24669 .set_piomode = sis_old_set_piomode,
24670 .set_dmamode = sis_66_set_dmamode,
24671 .cable_detect = sis_66_cable_detect,
24672 };
24673
24674 -static struct ata_port_operations sis_old_ops = {
24675 +static const struct ata_port_operations sis_old_ops = {
24676 .inherits = &sis_base_ops,
24677 .set_piomode = sis_old_set_piomode,
24678 .set_dmamode = sis_old_set_dmamode,
24679 diff -urNp linux-2.6.32.42/drivers/ata/pata_sl82c105.c linux-2.6.32.42/drivers/ata/pata_sl82c105.c
24680 --- linux-2.6.32.42/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
24681 +++ linux-2.6.32.42/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
24682 @@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
24683 ATA_BMDMA_SHT(DRV_NAME),
24684 };
24685
24686 -static struct ata_port_operations sl82c105_port_ops = {
24687 +static const struct ata_port_operations sl82c105_port_ops = {
24688 .inherits = &ata_bmdma_port_ops,
24689 .qc_defer = sl82c105_qc_defer,
24690 .bmdma_start = sl82c105_bmdma_start,
24691 diff -urNp linux-2.6.32.42/drivers/ata/pata_triflex.c linux-2.6.32.42/drivers/ata/pata_triflex.c
24692 --- linux-2.6.32.42/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
24693 +++ linux-2.6.32.42/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
24694 @@ -178,7 +178,7 @@ static struct scsi_host_template triflex
24695 ATA_BMDMA_SHT(DRV_NAME),
24696 };
24697
24698 -static struct ata_port_operations triflex_port_ops = {
24699 +static const struct ata_port_operations triflex_port_ops = {
24700 .inherits = &ata_bmdma_port_ops,
24701 .bmdma_start = triflex_bmdma_start,
24702 .bmdma_stop = triflex_bmdma_stop,
24703 diff -urNp linux-2.6.32.42/drivers/ata/pata_via.c linux-2.6.32.42/drivers/ata/pata_via.c
24704 --- linux-2.6.32.42/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
24705 +++ linux-2.6.32.42/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
24706 @@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
24707 ATA_BMDMA_SHT(DRV_NAME),
24708 };
24709
24710 -static struct ata_port_operations via_port_ops = {
24711 +static const struct ata_port_operations via_port_ops = {
24712 .inherits = &ata_bmdma_port_ops,
24713 .cable_detect = via_cable_detect,
24714 .set_piomode = via_set_piomode,
24715 @@ -429,7 +429,7 @@ static struct ata_port_operations via_po
24716 .port_start = via_port_start,
24717 };
24718
24719 -static struct ata_port_operations via_port_ops_noirq = {
24720 +static const struct ata_port_operations via_port_ops_noirq = {
24721 .inherits = &via_port_ops,
24722 .sff_data_xfer = ata_sff_data_xfer_noirq,
24723 };
24724 diff -urNp linux-2.6.32.42/drivers/ata/pata_winbond.c linux-2.6.32.42/drivers/ata/pata_winbond.c
24725 --- linux-2.6.32.42/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
24726 +++ linux-2.6.32.42/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
24727 @@ -125,7 +125,7 @@ static struct scsi_host_template winbond
24728 ATA_PIO_SHT(DRV_NAME),
24729 };
24730
24731 -static struct ata_port_operations winbond_port_ops = {
24732 +static const struct ata_port_operations winbond_port_ops = {
24733 .inherits = &ata_sff_port_ops,
24734 .sff_data_xfer = winbond_data_xfer,
24735 .cable_detect = ata_cable_40wire,
24736 diff -urNp linux-2.6.32.42/drivers/ata/pdc_adma.c linux-2.6.32.42/drivers/ata/pdc_adma.c
24737 --- linux-2.6.32.42/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
24738 +++ linux-2.6.32.42/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
24739 @@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
24740 .dma_boundary = ADMA_DMA_BOUNDARY,
24741 };
24742
24743 -static struct ata_port_operations adma_ata_ops = {
24744 +static const struct ata_port_operations adma_ata_ops = {
24745 .inherits = &ata_sff_port_ops,
24746
24747 .lost_interrupt = ATA_OP_NULL,
24748 diff -urNp linux-2.6.32.42/drivers/ata/sata_fsl.c linux-2.6.32.42/drivers/ata/sata_fsl.c
24749 --- linux-2.6.32.42/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
24750 +++ linux-2.6.32.42/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
24751 @@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
24752 .dma_boundary = ATA_DMA_BOUNDARY,
24753 };
24754
24755 -static struct ata_port_operations sata_fsl_ops = {
24756 +static const struct ata_port_operations sata_fsl_ops = {
24757 .inherits = &sata_pmp_port_ops,
24758
24759 .qc_defer = ata_std_qc_defer,
24760 diff -urNp linux-2.6.32.42/drivers/ata/sata_inic162x.c linux-2.6.32.42/drivers/ata/sata_inic162x.c
24761 --- linux-2.6.32.42/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
24762 +++ linux-2.6.32.42/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
24763 @@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
24764 return 0;
24765 }
24766
24767 -static struct ata_port_operations inic_port_ops = {
24768 +static const struct ata_port_operations inic_port_ops = {
24769 .inherits = &sata_port_ops,
24770
24771 .check_atapi_dma = inic_check_atapi_dma,
24772 diff -urNp linux-2.6.32.42/drivers/ata/sata_mv.c linux-2.6.32.42/drivers/ata/sata_mv.c
24773 --- linux-2.6.32.42/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
24774 +++ linux-2.6.32.42/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
24775 @@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
24776 .dma_boundary = MV_DMA_BOUNDARY,
24777 };
24778
24779 -static struct ata_port_operations mv5_ops = {
24780 +static const struct ata_port_operations mv5_ops = {
24781 .inherits = &ata_sff_port_ops,
24782
24783 .lost_interrupt = ATA_OP_NULL,
24784 @@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
24785 .port_stop = mv_port_stop,
24786 };
24787
24788 -static struct ata_port_operations mv6_ops = {
24789 +static const struct ata_port_operations mv6_ops = {
24790 .inherits = &mv5_ops,
24791 .dev_config = mv6_dev_config,
24792 .scr_read = mv_scr_read,
24793 @@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
24794 .bmdma_status = mv_bmdma_status,
24795 };
24796
24797 -static struct ata_port_operations mv_iie_ops = {
24798 +static const struct ata_port_operations mv_iie_ops = {
24799 .inherits = &mv6_ops,
24800 .dev_config = ATA_OP_NULL,
24801 .qc_prep = mv_qc_prep_iie,
24802 diff -urNp linux-2.6.32.42/drivers/ata/sata_nv.c linux-2.6.32.42/drivers/ata/sata_nv.c
24803 --- linux-2.6.32.42/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
24804 +++ linux-2.6.32.42/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
24805 @@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
24806 * cases. Define nv_hardreset() which only kicks in for post-boot
24807 * probing and use it for all variants.
24808 */
24809 -static struct ata_port_operations nv_generic_ops = {
24810 +static const struct ata_port_operations nv_generic_ops = {
24811 .inherits = &ata_bmdma_port_ops,
24812 .lost_interrupt = ATA_OP_NULL,
24813 .scr_read = nv_scr_read,
24814 @@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
24815 .hardreset = nv_hardreset,
24816 };
24817
24818 -static struct ata_port_operations nv_nf2_ops = {
24819 +static const struct ata_port_operations nv_nf2_ops = {
24820 .inherits = &nv_generic_ops,
24821 .freeze = nv_nf2_freeze,
24822 .thaw = nv_nf2_thaw,
24823 };
24824
24825 -static struct ata_port_operations nv_ck804_ops = {
24826 +static const struct ata_port_operations nv_ck804_ops = {
24827 .inherits = &nv_generic_ops,
24828 .freeze = nv_ck804_freeze,
24829 .thaw = nv_ck804_thaw,
24830 .host_stop = nv_ck804_host_stop,
24831 };
24832
24833 -static struct ata_port_operations nv_adma_ops = {
24834 +static const struct ata_port_operations nv_adma_ops = {
24835 .inherits = &nv_ck804_ops,
24836
24837 .check_atapi_dma = nv_adma_check_atapi_dma,
24838 @@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
24839 .host_stop = nv_adma_host_stop,
24840 };
24841
24842 -static struct ata_port_operations nv_swncq_ops = {
24843 +static const struct ata_port_operations nv_swncq_ops = {
24844 .inherits = &nv_generic_ops,
24845
24846 .qc_defer = ata_std_qc_defer,
24847 diff -urNp linux-2.6.32.42/drivers/ata/sata_promise.c linux-2.6.32.42/drivers/ata/sata_promise.c
24848 --- linux-2.6.32.42/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
24849 +++ linux-2.6.32.42/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
24850 @@ -195,7 +195,7 @@ static const struct ata_port_operations
24851 .error_handler = pdc_error_handler,
24852 };
24853
24854 -static struct ata_port_operations pdc_sata_ops = {
24855 +static const struct ata_port_operations pdc_sata_ops = {
24856 .inherits = &pdc_common_ops,
24857 .cable_detect = pdc_sata_cable_detect,
24858 .freeze = pdc_sata_freeze,
24859 @@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
24860
24861 /* First-generation chips need a more restrictive ->check_atapi_dma op,
24862 and ->freeze/thaw that ignore the hotplug controls. */
24863 -static struct ata_port_operations pdc_old_sata_ops = {
24864 +static const struct ata_port_operations pdc_old_sata_ops = {
24865 .inherits = &pdc_sata_ops,
24866 .freeze = pdc_freeze,
24867 .thaw = pdc_thaw,
24868 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
24869 };
24870
24871 -static struct ata_port_operations pdc_pata_ops = {
24872 +static const struct ata_port_operations pdc_pata_ops = {
24873 .inherits = &pdc_common_ops,
24874 .cable_detect = pdc_pata_cable_detect,
24875 .freeze = pdc_freeze,
24876 diff -urNp linux-2.6.32.42/drivers/ata/sata_qstor.c linux-2.6.32.42/drivers/ata/sata_qstor.c
24877 --- linux-2.6.32.42/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
24878 +++ linux-2.6.32.42/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
24879 @@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
24880 .dma_boundary = QS_DMA_BOUNDARY,
24881 };
24882
24883 -static struct ata_port_operations qs_ata_ops = {
24884 +static const struct ata_port_operations qs_ata_ops = {
24885 .inherits = &ata_sff_port_ops,
24886
24887 .check_atapi_dma = qs_check_atapi_dma,
24888 diff -urNp linux-2.6.32.42/drivers/ata/sata_sil24.c linux-2.6.32.42/drivers/ata/sata_sil24.c
24889 --- linux-2.6.32.42/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
24890 +++ linux-2.6.32.42/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
24891 @@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
24892 .dma_boundary = ATA_DMA_BOUNDARY,
24893 };
24894
24895 -static struct ata_port_operations sil24_ops = {
24896 +static const struct ata_port_operations sil24_ops = {
24897 .inherits = &sata_pmp_port_ops,
24898
24899 .qc_defer = sil24_qc_defer,
24900 diff -urNp linux-2.6.32.42/drivers/ata/sata_sil.c linux-2.6.32.42/drivers/ata/sata_sil.c
24901 --- linux-2.6.32.42/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
24902 +++ linux-2.6.32.42/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
24903 @@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
24904 .sg_tablesize = ATA_MAX_PRD
24905 };
24906
24907 -static struct ata_port_operations sil_ops = {
24908 +static const struct ata_port_operations sil_ops = {
24909 .inherits = &ata_bmdma32_port_ops,
24910 .dev_config = sil_dev_config,
24911 .set_mode = sil_set_mode,
24912 diff -urNp linux-2.6.32.42/drivers/ata/sata_sis.c linux-2.6.32.42/drivers/ata/sata_sis.c
24913 --- linux-2.6.32.42/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
24914 +++ linux-2.6.32.42/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
24915 @@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
24916 ATA_BMDMA_SHT(DRV_NAME),
24917 };
24918
24919 -static struct ata_port_operations sis_ops = {
24920 +static const struct ata_port_operations sis_ops = {
24921 .inherits = &ata_bmdma_port_ops,
24922 .scr_read = sis_scr_read,
24923 .scr_write = sis_scr_write,
24924 diff -urNp linux-2.6.32.42/drivers/ata/sata_svw.c linux-2.6.32.42/drivers/ata/sata_svw.c
24925 --- linux-2.6.32.42/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
24926 +++ linux-2.6.32.42/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
24927 @@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
24928 };
24929
24930
24931 -static struct ata_port_operations k2_sata_ops = {
24932 +static const struct ata_port_operations k2_sata_ops = {
24933 .inherits = &ata_bmdma_port_ops,
24934 .sff_tf_load = k2_sata_tf_load,
24935 .sff_tf_read = k2_sata_tf_read,
24936 diff -urNp linux-2.6.32.42/drivers/ata/sata_sx4.c linux-2.6.32.42/drivers/ata/sata_sx4.c
24937 --- linux-2.6.32.42/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
24938 +++ linux-2.6.32.42/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
24939 @@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
24940 };
24941
24942 /* TODO: inherit from base port_ops after converting to new EH */
24943 -static struct ata_port_operations pdc_20621_ops = {
24944 +static const struct ata_port_operations pdc_20621_ops = {
24945 .inherits = &ata_sff_port_ops,
24946
24947 .check_atapi_dma = pdc_check_atapi_dma,
24948 diff -urNp linux-2.6.32.42/drivers/ata/sata_uli.c linux-2.6.32.42/drivers/ata/sata_uli.c
24949 --- linux-2.6.32.42/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
24950 +++ linux-2.6.32.42/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
24951 @@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
24952 ATA_BMDMA_SHT(DRV_NAME),
24953 };
24954
24955 -static struct ata_port_operations uli_ops = {
24956 +static const struct ata_port_operations uli_ops = {
24957 .inherits = &ata_bmdma_port_ops,
24958 .scr_read = uli_scr_read,
24959 .scr_write = uli_scr_write,
24960 diff -urNp linux-2.6.32.42/drivers/ata/sata_via.c linux-2.6.32.42/drivers/ata/sata_via.c
24961 --- linux-2.6.32.42/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
24962 +++ linux-2.6.32.42/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
24963 @@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
24964 ATA_BMDMA_SHT(DRV_NAME),
24965 };
24966
24967 -static struct ata_port_operations svia_base_ops = {
24968 +static const struct ata_port_operations svia_base_ops = {
24969 .inherits = &ata_bmdma_port_ops,
24970 .sff_tf_load = svia_tf_load,
24971 };
24972
24973 -static struct ata_port_operations vt6420_sata_ops = {
24974 +static const struct ata_port_operations vt6420_sata_ops = {
24975 .inherits = &svia_base_ops,
24976 .freeze = svia_noop_freeze,
24977 .prereset = vt6420_prereset,
24978 .bmdma_start = vt6420_bmdma_start,
24979 };
24980
24981 -static struct ata_port_operations vt6421_pata_ops = {
24982 +static const struct ata_port_operations vt6421_pata_ops = {
24983 .inherits = &svia_base_ops,
24984 .cable_detect = vt6421_pata_cable_detect,
24985 .set_piomode = vt6421_set_pio_mode,
24986 .set_dmamode = vt6421_set_dma_mode,
24987 };
24988
24989 -static struct ata_port_operations vt6421_sata_ops = {
24990 +static const struct ata_port_operations vt6421_sata_ops = {
24991 .inherits = &svia_base_ops,
24992 .scr_read = svia_scr_read,
24993 .scr_write = svia_scr_write,
24994 };
24995
24996 -static struct ata_port_operations vt8251_ops = {
24997 +static const struct ata_port_operations vt8251_ops = {
24998 .inherits = &svia_base_ops,
24999 .hardreset = sata_std_hardreset,
25000 .scr_read = vt8251_scr_read,
25001 diff -urNp linux-2.6.32.42/drivers/ata/sata_vsc.c linux-2.6.32.42/drivers/ata/sata_vsc.c
25002 --- linux-2.6.32.42/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
25003 +++ linux-2.6.32.42/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
25004 @@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
25005 };
25006
25007
25008 -static struct ata_port_operations vsc_sata_ops = {
25009 +static const struct ata_port_operations vsc_sata_ops = {
25010 .inherits = &ata_bmdma_port_ops,
25011 /* The IRQ handling is not quite standard SFF behaviour so we
25012 cannot use the default lost interrupt handler */
25013 diff -urNp linux-2.6.32.42/drivers/atm/adummy.c linux-2.6.32.42/drivers/atm/adummy.c
25014 --- linux-2.6.32.42/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
25015 +++ linux-2.6.32.42/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
25016 @@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
25017 vcc->pop(vcc, skb);
25018 else
25019 dev_kfree_skb_any(skb);
25020 - atomic_inc(&vcc->stats->tx);
25021 + atomic_inc_unchecked(&vcc->stats->tx);
25022
25023 return 0;
25024 }
25025 diff -urNp linux-2.6.32.42/drivers/atm/ambassador.c linux-2.6.32.42/drivers/atm/ambassador.c
25026 --- linux-2.6.32.42/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
25027 +++ linux-2.6.32.42/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
25028 @@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
25029 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25030
25031 // VC layer stats
25032 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25033 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25034
25035 // free the descriptor
25036 kfree (tx_descr);
25037 @@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
25038 dump_skb ("<<<", vc, skb);
25039
25040 // VC layer stats
25041 - atomic_inc(&atm_vcc->stats->rx);
25042 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25043 __net_timestamp(skb);
25044 // end of our responsability
25045 atm_vcc->push (atm_vcc, skb);
25046 @@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
25047 } else {
25048 PRINTK (KERN_INFO, "dropped over-size frame");
25049 // should we count this?
25050 - atomic_inc(&atm_vcc->stats->rx_drop);
25051 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25052 }
25053
25054 } else {
25055 @@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
25056 }
25057
25058 if (check_area (skb->data, skb->len)) {
25059 - atomic_inc(&atm_vcc->stats->tx_err);
25060 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25061 return -ENOMEM; // ?
25062 }
25063
25064 diff -urNp linux-2.6.32.42/drivers/atm/atmtcp.c linux-2.6.32.42/drivers/atm/atmtcp.c
25065 --- linux-2.6.32.42/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
25066 +++ linux-2.6.32.42/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
25067 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
25068 if (vcc->pop) vcc->pop(vcc,skb);
25069 else dev_kfree_skb(skb);
25070 if (dev_data) return 0;
25071 - atomic_inc(&vcc->stats->tx_err);
25072 + atomic_inc_unchecked(&vcc->stats->tx_err);
25073 return -ENOLINK;
25074 }
25075 size = skb->len+sizeof(struct atmtcp_hdr);
25076 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
25077 if (!new_skb) {
25078 if (vcc->pop) vcc->pop(vcc,skb);
25079 else dev_kfree_skb(skb);
25080 - atomic_inc(&vcc->stats->tx_err);
25081 + atomic_inc_unchecked(&vcc->stats->tx_err);
25082 return -ENOBUFS;
25083 }
25084 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25085 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
25086 if (vcc->pop) vcc->pop(vcc,skb);
25087 else dev_kfree_skb(skb);
25088 out_vcc->push(out_vcc,new_skb);
25089 - atomic_inc(&vcc->stats->tx);
25090 - atomic_inc(&out_vcc->stats->rx);
25091 + atomic_inc_unchecked(&vcc->stats->tx);
25092 + atomic_inc_unchecked(&out_vcc->stats->rx);
25093 return 0;
25094 }
25095
25096 @@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
25097 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25098 read_unlock(&vcc_sklist_lock);
25099 if (!out_vcc) {
25100 - atomic_inc(&vcc->stats->tx_err);
25101 + atomic_inc_unchecked(&vcc->stats->tx_err);
25102 goto done;
25103 }
25104 skb_pull(skb,sizeof(struct atmtcp_hdr));
25105 @@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
25106 __net_timestamp(new_skb);
25107 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25108 out_vcc->push(out_vcc,new_skb);
25109 - atomic_inc(&vcc->stats->tx);
25110 - atomic_inc(&out_vcc->stats->rx);
25111 + atomic_inc_unchecked(&vcc->stats->tx);
25112 + atomic_inc_unchecked(&out_vcc->stats->rx);
25113 done:
25114 if (vcc->pop) vcc->pop(vcc,skb);
25115 else dev_kfree_skb(skb);
25116 diff -urNp linux-2.6.32.42/drivers/atm/eni.c linux-2.6.32.42/drivers/atm/eni.c
25117 --- linux-2.6.32.42/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
25118 +++ linux-2.6.32.42/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
25119 @@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25120 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25121 vcc->dev->number);
25122 length = 0;
25123 - atomic_inc(&vcc->stats->rx_err);
25124 + atomic_inc_unchecked(&vcc->stats->rx_err);
25125 }
25126 else {
25127 length = ATM_CELL_SIZE-1; /* no HEC */
25128 @@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25129 size);
25130 }
25131 eff = length = 0;
25132 - atomic_inc(&vcc->stats->rx_err);
25133 + atomic_inc_unchecked(&vcc->stats->rx_err);
25134 }
25135 else {
25136 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25137 @@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25138 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25139 vcc->dev->number,vcc->vci,length,size << 2,descr);
25140 length = eff = 0;
25141 - atomic_inc(&vcc->stats->rx_err);
25142 + atomic_inc_unchecked(&vcc->stats->rx_err);
25143 }
25144 }
25145 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25146 @@ -770,7 +770,7 @@ rx_dequeued++;
25147 vcc->push(vcc,skb);
25148 pushed++;
25149 }
25150 - atomic_inc(&vcc->stats->rx);
25151 + atomic_inc_unchecked(&vcc->stats->rx);
25152 }
25153 wake_up(&eni_dev->rx_wait);
25154 }
25155 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
25156 PCI_DMA_TODEVICE);
25157 if (vcc->pop) vcc->pop(vcc,skb);
25158 else dev_kfree_skb_irq(skb);
25159 - atomic_inc(&vcc->stats->tx);
25160 + atomic_inc_unchecked(&vcc->stats->tx);
25161 wake_up(&eni_dev->tx_wait);
25162 dma_complete++;
25163 }
25164 diff -urNp linux-2.6.32.42/drivers/atm/firestream.c linux-2.6.32.42/drivers/atm/firestream.c
25165 --- linux-2.6.32.42/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
25166 +++ linux-2.6.32.42/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
25167 @@ -748,7 +748,7 @@ static void process_txdone_queue (struct
25168 }
25169 }
25170
25171 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25172 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25173
25174 fs_dprintk (FS_DEBUG_TXMEM, "i");
25175 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25176 @@ -815,7 +815,7 @@ static void process_incoming (struct fs_
25177 #endif
25178 skb_put (skb, qe->p1 & 0xffff);
25179 ATM_SKB(skb)->vcc = atm_vcc;
25180 - atomic_inc(&atm_vcc->stats->rx);
25181 + atomic_inc_unchecked(&atm_vcc->stats->rx);
25182 __net_timestamp(skb);
25183 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25184 atm_vcc->push (atm_vcc, skb);
25185 @@ -836,12 +836,12 @@ static void process_incoming (struct fs_
25186 kfree (pe);
25187 }
25188 if (atm_vcc)
25189 - atomic_inc(&atm_vcc->stats->rx_drop);
25190 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25191 break;
25192 case 0x1f: /* Reassembly abort: no buffers. */
25193 /* Silently increment error counter. */
25194 if (atm_vcc)
25195 - atomic_inc(&atm_vcc->stats->rx_drop);
25196 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25197 break;
25198 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25199 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25200 diff -urNp linux-2.6.32.42/drivers/atm/fore200e.c linux-2.6.32.42/drivers/atm/fore200e.c
25201 --- linux-2.6.32.42/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
25202 +++ linux-2.6.32.42/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
25203 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
25204 #endif
25205 /* check error condition */
25206 if (*entry->status & STATUS_ERROR)
25207 - atomic_inc(&vcc->stats->tx_err);
25208 + atomic_inc_unchecked(&vcc->stats->tx_err);
25209 else
25210 - atomic_inc(&vcc->stats->tx);
25211 + atomic_inc_unchecked(&vcc->stats->tx);
25212 }
25213 }
25214
25215 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
25216 if (skb == NULL) {
25217 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25218
25219 - atomic_inc(&vcc->stats->rx_drop);
25220 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25221 return -ENOMEM;
25222 }
25223
25224 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
25225
25226 dev_kfree_skb_any(skb);
25227
25228 - atomic_inc(&vcc->stats->rx_drop);
25229 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25230 return -ENOMEM;
25231 }
25232
25233 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25234
25235 vcc->push(vcc, skb);
25236 - atomic_inc(&vcc->stats->rx);
25237 + atomic_inc_unchecked(&vcc->stats->rx);
25238
25239 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25240
25241 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
25242 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25243 fore200e->atm_dev->number,
25244 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25245 - atomic_inc(&vcc->stats->rx_err);
25246 + atomic_inc_unchecked(&vcc->stats->rx_err);
25247 }
25248 }
25249
25250 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
25251 goto retry_here;
25252 }
25253
25254 - atomic_inc(&vcc->stats->tx_err);
25255 + atomic_inc_unchecked(&vcc->stats->tx_err);
25256
25257 fore200e->tx_sat++;
25258 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25259 diff -urNp linux-2.6.32.42/drivers/atm/he.c linux-2.6.32.42/drivers/atm/he.c
25260 --- linux-2.6.32.42/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
25261 +++ linux-2.6.32.42/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
25262 @@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25263
25264 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25265 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25266 - atomic_inc(&vcc->stats->rx_drop);
25267 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25268 goto return_host_buffers;
25269 }
25270
25271 @@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25272 RBRQ_LEN_ERR(he_dev->rbrq_head)
25273 ? "LEN_ERR" : "",
25274 vcc->vpi, vcc->vci);
25275 - atomic_inc(&vcc->stats->rx_err);
25276 + atomic_inc_unchecked(&vcc->stats->rx_err);
25277 goto return_host_buffers;
25278 }
25279
25280 @@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25281 vcc->push(vcc, skb);
25282 spin_lock(&he_dev->global_lock);
25283
25284 - atomic_inc(&vcc->stats->rx);
25285 + atomic_inc_unchecked(&vcc->stats->rx);
25286
25287 return_host_buffers:
25288 ++pdus_assembled;
25289 @@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
25290 tpd->vcc->pop(tpd->vcc, tpd->skb);
25291 else
25292 dev_kfree_skb_any(tpd->skb);
25293 - atomic_inc(&tpd->vcc->stats->tx_err);
25294 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25295 }
25296 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25297 return;
25298 @@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25299 vcc->pop(vcc, skb);
25300 else
25301 dev_kfree_skb_any(skb);
25302 - atomic_inc(&vcc->stats->tx_err);
25303 + atomic_inc_unchecked(&vcc->stats->tx_err);
25304 return -EINVAL;
25305 }
25306
25307 @@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25308 vcc->pop(vcc, skb);
25309 else
25310 dev_kfree_skb_any(skb);
25311 - atomic_inc(&vcc->stats->tx_err);
25312 + atomic_inc_unchecked(&vcc->stats->tx_err);
25313 return -EINVAL;
25314 }
25315 #endif
25316 @@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25317 vcc->pop(vcc, skb);
25318 else
25319 dev_kfree_skb_any(skb);
25320 - atomic_inc(&vcc->stats->tx_err);
25321 + atomic_inc_unchecked(&vcc->stats->tx_err);
25322 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25323 return -ENOMEM;
25324 }
25325 @@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25326 vcc->pop(vcc, skb);
25327 else
25328 dev_kfree_skb_any(skb);
25329 - atomic_inc(&vcc->stats->tx_err);
25330 + atomic_inc_unchecked(&vcc->stats->tx_err);
25331 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25332 return -ENOMEM;
25333 }
25334 @@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25335 __enqueue_tpd(he_dev, tpd, cid);
25336 spin_unlock_irqrestore(&he_dev->global_lock, flags);
25337
25338 - atomic_inc(&vcc->stats->tx);
25339 + atomic_inc_unchecked(&vcc->stats->tx);
25340
25341 return 0;
25342 }
25343 diff -urNp linux-2.6.32.42/drivers/atm/horizon.c linux-2.6.32.42/drivers/atm/horizon.c
25344 --- linux-2.6.32.42/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
25345 +++ linux-2.6.32.42/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
25346 @@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
25347 {
25348 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
25349 // VC layer stats
25350 - atomic_inc(&vcc->stats->rx);
25351 + atomic_inc_unchecked(&vcc->stats->rx);
25352 __net_timestamp(skb);
25353 // end of our responsability
25354 vcc->push (vcc, skb);
25355 @@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
25356 dev->tx_iovec = NULL;
25357
25358 // VC layer stats
25359 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25360 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25361
25362 // free the skb
25363 hrz_kfree_skb (skb);
25364 diff -urNp linux-2.6.32.42/drivers/atm/idt77252.c linux-2.6.32.42/drivers/atm/idt77252.c
25365 --- linux-2.6.32.42/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
25366 +++ linux-2.6.32.42/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
25367 @@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
25368 else
25369 dev_kfree_skb(skb);
25370
25371 - atomic_inc(&vcc->stats->tx);
25372 + atomic_inc_unchecked(&vcc->stats->tx);
25373 }
25374
25375 atomic_dec(&scq->used);
25376 @@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
25377 if ((sb = dev_alloc_skb(64)) == NULL) {
25378 printk("%s: Can't allocate buffers for aal0.\n",
25379 card->name);
25380 - atomic_add(i, &vcc->stats->rx_drop);
25381 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
25382 break;
25383 }
25384 if (!atm_charge(vcc, sb->truesize)) {
25385 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
25386 card->name);
25387 - atomic_add(i - 1, &vcc->stats->rx_drop);
25388 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
25389 dev_kfree_skb(sb);
25390 break;
25391 }
25392 @@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
25393 ATM_SKB(sb)->vcc = vcc;
25394 __net_timestamp(sb);
25395 vcc->push(vcc, sb);
25396 - atomic_inc(&vcc->stats->rx);
25397 + atomic_inc_unchecked(&vcc->stats->rx);
25398
25399 cell += ATM_CELL_PAYLOAD;
25400 }
25401 @@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
25402 "(CDC: %08x)\n",
25403 card->name, len, rpp->len, readl(SAR_REG_CDC));
25404 recycle_rx_pool_skb(card, rpp);
25405 - atomic_inc(&vcc->stats->rx_err);
25406 + atomic_inc_unchecked(&vcc->stats->rx_err);
25407 return;
25408 }
25409 if (stat & SAR_RSQE_CRC) {
25410 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
25411 recycle_rx_pool_skb(card, rpp);
25412 - atomic_inc(&vcc->stats->rx_err);
25413 + atomic_inc_unchecked(&vcc->stats->rx_err);
25414 return;
25415 }
25416 if (skb_queue_len(&rpp->queue) > 1) {
25417 @@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
25418 RXPRINTK("%s: Can't alloc RX skb.\n",
25419 card->name);
25420 recycle_rx_pool_skb(card, rpp);
25421 - atomic_inc(&vcc->stats->rx_err);
25422 + atomic_inc_unchecked(&vcc->stats->rx_err);
25423 return;
25424 }
25425 if (!atm_charge(vcc, skb->truesize)) {
25426 @@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
25427 __net_timestamp(skb);
25428
25429 vcc->push(vcc, skb);
25430 - atomic_inc(&vcc->stats->rx);
25431 + atomic_inc_unchecked(&vcc->stats->rx);
25432
25433 return;
25434 }
25435 @@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
25436 __net_timestamp(skb);
25437
25438 vcc->push(vcc, skb);
25439 - atomic_inc(&vcc->stats->rx);
25440 + atomic_inc_unchecked(&vcc->stats->rx);
25441
25442 if (skb->truesize > SAR_FB_SIZE_3)
25443 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
25444 @@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
25445 if (vcc->qos.aal != ATM_AAL0) {
25446 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
25447 card->name, vpi, vci);
25448 - atomic_inc(&vcc->stats->rx_drop);
25449 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25450 goto drop;
25451 }
25452
25453 if ((sb = dev_alloc_skb(64)) == NULL) {
25454 printk("%s: Can't allocate buffers for AAL0.\n",
25455 card->name);
25456 - atomic_inc(&vcc->stats->rx_err);
25457 + atomic_inc_unchecked(&vcc->stats->rx_err);
25458 goto drop;
25459 }
25460
25461 @@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
25462 ATM_SKB(sb)->vcc = vcc;
25463 __net_timestamp(sb);
25464 vcc->push(vcc, sb);
25465 - atomic_inc(&vcc->stats->rx);
25466 + atomic_inc_unchecked(&vcc->stats->rx);
25467
25468 drop:
25469 skb_pull(queue, 64);
25470 @@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25471
25472 if (vc == NULL) {
25473 printk("%s: NULL connection in send().\n", card->name);
25474 - atomic_inc(&vcc->stats->tx_err);
25475 + atomic_inc_unchecked(&vcc->stats->tx_err);
25476 dev_kfree_skb(skb);
25477 return -EINVAL;
25478 }
25479 if (!test_bit(VCF_TX, &vc->flags)) {
25480 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
25481 - atomic_inc(&vcc->stats->tx_err);
25482 + atomic_inc_unchecked(&vcc->stats->tx_err);
25483 dev_kfree_skb(skb);
25484 return -EINVAL;
25485 }
25486 @@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25487 break;
25488 default:
25489 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
25490 - atomic_inc(&vcc->stats->tx_err);
25491 + atomic_inc_unchecked(&vcc->stats->tx_err);
25492 dev_kfree_skb(skb);
25493 return -EINVAL;
25494 }
25495
25496 if (skb_shinfo(skb)->nr_frags != 0) {
25497 printk("%s: No scatter-gather yet.\n", card->name);
25498 - atomic_inc(&vcc->stats->tx_err);
25499 + atomic_inc_unchecked(&vcc->stats->tx_err);
25500 dev_kfree_skb(skb);
25501 return -EINVAL;
25502 }
25503 @@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
25504
25505 err = queue_skb(card, vc, skb, oam);
25506 if (err) {
25507 - atomic_inc(&vcc->stats->tx_err);
25508 + atomic_inc_unchecked(&vcc->stats->tx_err);
25509 dev_kfree_skb(skb);
25510 return err;
25511 }
25512 @@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
25513 skb = dev_alloc_skb(64);
25514 if (!skb) {
25515 printk("%s: Out of memory in send_oam().\n", card->name);
25516 - atomic_inc(&vcc->stats->tx_err);
25517 + atomic_inc_unchecked(&vcc->stats->tx_err);
25518 return -ENOMEM;
25519 }
25520 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
25521 diff -urNp linux-2.6.32.42/drivers/atm/iphase.c linux-2.6.32.42/drivers/atm/iphase.c
25522 --- linux-2.6.32.42/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
25523 +++ linux-2.6.32.42/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
25524 @@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
25525 status = (u_short) (buf_desc_ptr->desc_mode);
25526 if (status & (RX_CER | RX_PTE | RX_OFL))
25527 {
25528 - atomic_inc(&vcc->stats->rx_err);
25529 + atomic_inc_unchecked(&vcc->stats->rx_err);
25530 IF_ERR(printk("IA: bad packet, dropping it");)
25531 if (status & RX_CER) {
25532 IF_ERR(printk(" cause: packet CRC error\n");)
25533 @@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
25534 len = dma_addr - buf_addr;
25535 if (len > iadev->rx_buf_sz) {
25536 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
25537 - atomic_inc(&vcc->stats->rx_err);
25538 + atomic_inc_unchecked(&vcc->stats->rx_err);
25539 goto out_free_desc;
25540 }
25541
25542 @@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
25543 ia_vcc = INPH_IA_VCC(vcc);
25544 if (ia_vcc == NULL)
25545 {
25546 - atomic_inc(&vcc->stats->rx_err);
25547 + atomic_inc_unchecked(&vcc->stats->rx_err);
25548 dev_kfree_skb_any(skb);
25549 atm_return(vcc, atm_guess_pdu2truesize(len));
25550 goto INCR_DLE;
25551 @@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
25552 if ((length > iadev->rx_buf_sz) || (length >
25553 (skb->len - sizeof(struct cpcs_trailer))))
25554 {
25555 - atomic_inc(&vcc->stats->rx_err);
25556 + atomic_inc_unchecked(&vcc->stats->rx_err);
25557 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
25558 length, skb->len);)
25559 dev_kfree_skb_any(skb);
25560 @@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
25561
25562 IF_RX(printk("rx_dle_intr: skb push");)
25563 vcc->push(vcc,skb);
25564 - atomic_inc(&vcc->stats->rx);
25565 + atomic_inc_unchecked(&vcc->stats->rx);
25566 iadev->rx_pkt_cnt++;
25567 }
25568 INCR_DLE:
25569 @@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
25570 {
25571 struct k_sonet_stats *stats;
25572 stats = &PRIV(_ia_dev[board])->sonet_stats;
25573 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
25574 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
25575 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
25576 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
25577 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
25578 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
25579 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
25580 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
25581 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
25582 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
25583 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
25584 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
25585 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
25586 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
25587 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
25588 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
25589 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
25590 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
25591 }
25592 ia_cmds.status = 0;
25593 break;
25594 @@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
25595 if ((desc == 0) || (desc > iadev->num_tx_desc))
25596 {
25597 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
25598 - atomic_inc(&vcc->stats->tx);
25599 + atomic_inc_unchecked(&vcc->stats->tx);
25600 if (vcc->pop)
25601 vcc->pop(vcc, skb);
25602 else
25603 @@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
25604 ATM_DESC(skb) = vcc->vci;
25605 skb_queue_tail(&iadev->tx_dma_q, skb);
25606
25607 - atomic_inc(&vcc->stats->tx);
25608 + atomic_inc_unchecked(&vcc->stats->tx);
25609 iadev->tx_pkt_cnt++;
25610 /* Increment transaction counter */
25611 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
25612
25613 #if 0
25614 /* add flow control logic */
25615 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
25616 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
25617 if (iavcc->vc_desc_cnt > 10) {
25618 vcc->tx_quota = vcc->tx_quota * 3 / 4;
25619 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
25620 diff -urNp linux-2.6.32.42/drivers/atm/lanai.c linux-2.6.32.42/drivers/atm/lanai.c
25621 --- linux-2.6.32.42/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
25622 +++ linux-2.6.32.42/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
25623 @@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
25624 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
25625 lanai_endtx(lanai, lvcc);
25626 lanai_free_skb(lvcc->tx.atmvcc, skb);
25627 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
25628 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
25629 }
25630
25631 /* Try to fill the buffer - don't call unless there is backlog */
25632 @@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
25633 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
25634 __net_timestamp(skb);
25635 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
25636 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
25637 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
25638 out:
25639 lvcc->rx.buf.ptr = end;
25640 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
25641 @@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
25642 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
25643 "vcc %d\n", lanai->number, (unsigned int) s, vci);
25644 lanai->stats.service_rxnotaal5++;
25645 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25646 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25647 return 0;
25648 }
25649 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
25650 @@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
25651 int bytes;
25652 read_unlock(&vcc_sklist_lock);
25653 DPRINTK("got trashed rx pdu on vci %d\n", vci);
25654 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25655 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25656 lvcc->stats.x.aal5.service_trash++;
25657 bytes = (SERVICE_GET_END(s) * 16) -
25658 (((unsigned long) lvcc->rx.buf.ptr) -
25659 @@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
25660 }
25661 if (s & SERVICE_STREAM) {
25662 read_unlock(&vcc_sklist_lock);
25663 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25664 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25665 lvcc->stats.x.aal5.service_stream++;
25666 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
25667 "PDU on VCI %d!\n", lanai->number, vci);
25668 @@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
25669 return 0;
25670 }
25671 DPRINTK("got rx crc error on vci %d\n", vci);
25672 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
25673 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
25674 lvcc->stats.x.aal5.service_rxcrc++;
25675 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
25676 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
25677 diff -urNp linux-2.6.32.42/drivers/atm/nicstar.c linux-2.6.32.42/drivers/atm/nicstar.c
25678 --- linux-2.6.32.42/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
25679 +++ linux-2.6.32.42/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
25680 @@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
25681 if ((vc = (vc_map *) vcc->dev_data) == NULL)
25682 {
25683 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
25684 - atomic_inc(&vcc->stats->tx_err);
25685 + atomic_inc_unchecked(&vcc->stats->tx_err);
25686 dev_kfree_skb_any(skb);
25687 return -EINVAL;
25688 }
25689 @@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
25690 if (!vc->tx)
25691 {
25692 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
25693 - atomic_inc(&vcc->stats->tx_err);
25694 + atomic_inc_unchecked(&vcc->stats->tx_err);
25695 dev_kfree_skb_any(skb);
25696 return -EINVAL;
25697 }
25698 @@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
25699 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
25700 {
25701 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
25702 - atomic_inc(&vcc->stats->tx_err);
25703 + atomic_inc_unchecked(&vcc->stats->tx_err);
25704 dev_kfree_skb_any(skb);
25705 return -EINVAL;
25706 }
25707 @@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
25708 if (skb_shinfo(skb)->nr_frags != 0)
25709 {
25710 printk("nicstar%d: No scatter-gather yet.\n", card->index);
25711 - atomic_inc(&vcc->stats->tx_err);
25712 + atomic_inc_unchecked(&vcc->stats->tx_err);
25713 dev_kfree_skb_any(skb);
25714 return -EINVAL;
25715 }
25716 @@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
25717
25718 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
25719 {
25720 - atomic_inc(&vcc->stats->tx_err);
25721 + atomic_inc_unchecked(&vcc->stats->tx_err);
25722 dev_kfree_skb_any(skb);
25723 return -EIO;
25724 }
25725 - atomic_inc(&vcc->stats->tx);
25726 + atomic_inc_unchecked(&vcc->stats->tx);
25727
25728 return 0;
25729 }
25730 @@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
25731 {
25732 printk("nicstar%d: Can't allocate buffers for aal0.\n",
25733 card->index);
25734 - atomic_add(i,&vcc->stats->rx_drop);
25735 + atomic_add_unchecked(i,&vcc->stats->rx_drop);
25736 break;
25737 }
25738 if (!atm_charge(vcc, sb->truesize))
25739 {
25740 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
25741 card->index);
25742 - atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
25743 + atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
25744 dev_kfree_skb_any(sb);
25745 break;
25746 }
25747 @@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
25748 ATM_SKB(sb)->vcc = vcc;
25749 __net_timestamp(sb);
25750 vcc->push(vcc, sb);
25751 - atomic_inc(&vcc->stats->rx);
25752 + atomic_inc_unchecked(&vcc->stats->rx);
25753 cell += ATM_CELL_PAYLOAD;
25754 }
25755
25756 @@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
25757 if (iovb == NULL)
25758 {
25759 printk("nicstar%d: Out of iovec buffers.\n", card->index);
25760 - atomic_inc(&vcc->stats->rx_drop);
25761 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25762 recycle_rx_buf(card, skb);
25763 return;
25764 }
25765 @@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
25766 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
25767 {
25768 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
25769 - atomic_inc(&vcc->stats->rx_err);
25770 + atomic_inc_unchecked(&vcc->stats->rx_err);
25771 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
25772 NS_SKB(iovb)->iovcnt = 0;
25773 iovb->len = 0;
25774 @@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
25775 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
25776 card->index);
25777 which_list(card, skb);
25778 - atomic_inc(&vcc->stats->rx_err);
25779 + atomic_inc_unchecked(&vcc->stats->rx_err);
25780 recycle_rx_buf(card, skb);
25781 vc->rx_iov = NULL;
25782 recycle_iov_buf(card, iovb);
25783 @@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
25784 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
25785 card->index);
25786 which_list(card, skb);
25787 - atomic_inc(&vcc->stats->rx_err);
25788 + atomic_inc_unchecked(&vcc->stats->rx_err);
25789 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25790 NS_SKB(iovb)->iovcnt);
25791 vc->rx_iov = NULL;
25792 @@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
25793 printk(" - PDU size mismatch.\n");
25794 else
25795 printk(".\n");
25796 - atomic_inc(&vcc->stats->rx_err);
25797 + atomic_inc_unchecked(&vcc->stats->rx_err);
25798 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25799 NS_SKB(iovb)->iovcnt);
25800 vc->rx_iov = NULL;
25801 @@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
25802 if (!atm_charge(vcc, skb->truesize))
25803 {
25804 push_rxbufs(card, skb);
25805 - atomic_inc(&vcc->stats->rx_drop);
25806 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25807 }
25808 else
25809 {
25810 @@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
25811 ATM_SKB(skb)->vcc = vcc;
25812 __net_timestamp(skb);
25813 vcc->push(vcc, skb);
25814 - atomic_inc(&vcc->stats->rx);
25815 + atomic_inc_unchecked(&vcc->stats->rx);
25816 }
25817 }
25818 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
25819 @@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
25820 if (!atm_charge(vcc, sb->truesize))
25821 {
25822 push_rxbufs(card, sb);
25823 - atomic_inc(&vcc->stats->rx_drop);
25824 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25825 }
25826 else
25827 {
25828 @@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
25829 ATM_SKB(sb)->vcc = vcc;
25830 __net_timestamp(sb);
25831 vcc->push(vcc, sb);
25832 - atomic_inc(&vcc->stats->rx);
25833 + atomic_inc_unchecked(&vcc->stats->rx);
25834 }
25835
25836 push_rxbufs(card, skb);
25837 @@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
25838 if (!atm_charge(vcc, skb->truesize))
25839 {
25840 push_rxbufs(card, skb);
25841 - atomic_inc(&vcc->stats->rx_drop);
25842 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25843 }
25844 else
25845 {
25846 @@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
25847 ATM_SKB(skb)->vcc = vcc;
25848 __net_timestamp(skb);
25849 vcc->push(vcc, skb);
25850 - atomic_inc(&vcc->stats->rx);
25851 + atomic_inc_unchecked(&vcc->stats->rx);
25852 }
25853
25854 push_rxbufs(card, sb);
25855 @@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
25856 if (hb == NULL)
25857 {
25858 printk("nicstar%d: Out of huge buffers.\n", card->index);
25859 - atomic_inc(&vcc->stats->rx_drop);
25860 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25861 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
25862 NS_SKB(iovb)->iovcnt);
25863 vc->rx_iov = NULL;
25864 @@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
25865 }
25866 else
25867 dev_kfree_skb_any(hb);
25868 - atomic_inc(&vcc->stats->rx_drop);
25869 + atomic_inc_unchecked(&vcc->stats->rx_drop);
25870 }
25871 else
25872 {
25873 @@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
25874 #endif /* NS_USE_DESTRUCTORS */
25875 __net_timestamp(hb);
25876 vcc->push(vcc, hb);
25877 - atomic_inc(&vcc->stats->rx);
25878 + atomic_inc_unchecked(&vcc->stats->rx);
25879 }
25880 }
25881
25882 diff -urNp linux-2.6.32.42/drivers/atm/solos-pci.c linux-2.6.32.42/drivers/atm/solos-pci.c
25883 --- linux-2.6.32.42/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
25884 +++ linux-2.6.32.42/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
25885 @@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
25886 }
25887 atm_charge(vcc, skb->truesize);
25888 vcc->push(vcc, skb);
25889 - atomic_inc(&vcc->stats->rx);
25890 + atomic_inc_unchecked(&vcc->stats->rx);
25891 break;
25892
25893 case PKT_STATUS:
25894 @@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
25895 char msg[500];
25896 char item[10];
25897
25898 + pax_track_stack();
25899 +
25900 len = buf->len;
25901 for (i = 0; i < len; i++){
25902 if(i % 8 == 0)
25903 @@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
25904 vcc = SKB_CB(oldskb)->vcc;
25905
25906 if (vcc) {
25907 - atomic_inc(&vcc->stats->tx);
25908 + atomic_inc_unchecked(&vcc->stats->tx);
25909 solos_pop(vcc, oldskb);
25910 } else
25911 dev_kfree_skb_irq(oldskb);
25912 diff -urNp linux-2.6.32.42/drivers/atm/suni.c linux-2.6.32.42/drivers/atm/suni.c
25913 --- linux-2.6.32.42/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
25914 +++ linux-2.6.32.42/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
25915 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
25916
25917
25918 #define ADD_LIMITED(s,v) \
25919 - atomic_add((v),&stats->s); \
25920 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
25921 + atomic_add_unchecked((v),&stats->s); \
25922 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
25923
25924
25925 static void suni_hz(unsigned long from_timer)
25926 diff -urNp linux-2.6.32.42/drivers/atm/uPD98402.c linux-2.6.32.42/drivers/atm/uPD98402.c
25927 --- linux-2.6.32.42/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
25928 +++ linux-2.6.32.42/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
25929 @@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
25930 struct sonet_stats tmp;
25931 int error = 0;
25932
25933 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
25934 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
25935 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
25936 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
25937 if (zero && !error) {
25938 @@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
25939
25940
25941 #define ADD_LIMITED(s,v) \
25942 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
25943 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
25944 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
25945 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
25946 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
25947 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
25948
25949
25950 static void stat_event(struct atm_dev *dev)
25951 @@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
25952 if (reason & uPD98402_INT_PFM) stat_event(dev);
25953 if (reason & uPD98402_INT_PCO) {
25954 (void) GET(PCOCR); /* clear interrupt cause */
25955 - atomic_add(GET(HECCT),
25956 + atomic_add_unchecked(GET(HECCT),
25957 &PRIV(dev)->sonet_stats.uncorr_hcs);
25958 }
25959 if ((reason & uPD98402_INT_RFO) &&
25960 @@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
25961 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
25962 uPD98402_INT_LOS),PIMR); /* enable them */
25963 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
25964 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
25965 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
25966 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
25967 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
25968 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
25969 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
25970 return 0;
25971 }
25972
25973 diff -urNp linux-2.6.32.42/drivers/atm/zatm.c linux-2.6.32.42/drivers/atm/zatm.c
25974 --- linux-2.6.32.42/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
25975 +++ linux-2.6.32.42/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
25976 @@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
25977 }
25978 if (!size) {
25979 dev_kfree_skb_irq(skb);
25980 - if (vcc) atomic_inc(&vcc->stats->rx_err);
25981 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
25982 continue;
25983 }
25984 if (!atm_charge(vcc,skb->truesize)) {
25985 @@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
25986 skb->len = size;
25987 ATM_SKB(skb)->vcc = vcc;
25988 vcc->push(vcc,skb);
25989 - atomic_inc(&vcc->stats->rx);
25990 + atomic_inc_unchecked(&vcc->stats->rx);
25991 }
25992 zout(pos & 0xffff,MTA(mbx));
25993 #if 0 /* probably a stupid idea */
25994 @@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
25995 skb_queue_head(&zatm_vcc->backlog,skb);
25996 break;
25997 }
25998 - atomic_inc(&vcc->stats->tx);
25999 + atomic_inc_unchecked(&vcc->stats->tx);
26000 wake_up(&zatm_vcc->tx_wait);
26001 }
26002
26003 diff -urNp linux-2.6.32.42/drivers/base/bus.c linux-2.6.32.42/drivers/base/bus.c
26004 --- linux-2.6.32.42/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
26005 +++ linux-2.6.32.42/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
26006 @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
26007 return ret;
26008 }
26009
26010 -static struct sysfs_ops driver_sysfs_ops = {
26011 +static const struct sysfs_ops driver_sysfs_ops = {
26012 .show = drv_attr_show,
26013 .store = drv_attr_store,
26014 };
26015 @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
26016 return ret;
26017 }
26018
26019 -static struct sysfs_ops bus_sysfs_ops = {
26020 +static const struct sysfs_ops bus_sysfs_ops = {
26021 .show = bus_attr_show,
26022 .store = bus_attr_store,
26023 };
26024 @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
26025 return 0;
26026 }
26027
26028 -static struct kset_uevent_ops bus_uevent_ops = {
26029 +static const struct kset_uevent_ops bus_uevent_ops = {
26030 .filter = bus_uevent_filter,
26031 };
26032
26033 diff -urNp linux-2.6.32.42/drivers/base/class.c linux-2.6.32.42/drivers/base/class.c
26034 --- linux-2.6.32.42/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
26035 +++ linux-2.6.32.42/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
26036 @@ -63,7 +63,7 @@ static void class_release(struct kobject
26037 kfree(cp);
26038 }
26039
26040 -static struct sysfs_ops class_sysfs_ops = {
26041 +static const struct sysfs_ops class_sysfs_ops = {
26042 .show = class_attr_show,
26043 .store = class_attr_store,
26044 };
26045 diff -urNp linux-2.6.32.42/drivers/base/core.c linux-2.6.32.42/drivers/base/core.c
26046 --- linux-2.6.32.42/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
26047 +++ linux-2.6.32.42/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
26048 @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
26049 return ret;
26050 }
26051
26052 -static struct sysfs_ops dev_sysfs_ops = {
26053 +static const struct sysfs_ops dev_sysfs_ops = {
26054 .show = dev_attr_show,
26055 .store = dev_attr_store,
26056 };
26057 @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
26058 return retval;
26059 }
26060
26061 -static struct kset_uevent_ops device_uevent_ops = {
26062 +static const struct kset_uevent_ops device_uevent_ops = {
26063 .filter = dev_uevent_filter,
26064 .name = dev_uevent_name,
26065 .uevent = dev_uevent,
26066 diff -urNp linux-2.6.32.42/drivers/base/memory.c linux-2.6.32.42/drivers/base/memory.c
26067 --- linux-2.6.32.42/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
26068 +++ linux-2.6.32.42/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
26069 @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
26070 return retval;
26071 }
26072
26073 -static struct kset_uevent_ops memory_uevent_ops = {
26074 +static const struct kset_uevent_ops memory_uevent_ops = {
26075 .name = memory_uevent_name,
26076 .uevent = memory_uevent,
26077 };
26078 diff -urNp linux-2.6.32.42/drivers/base/sys.c linux-2.6.32.42/drivers/base/sys.c
26079 --- linux-2.6.32.42/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
26080 +++ linux-2.6.32.42/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
26081 @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
26082 return -EIO;
26083 }
26084
26085 -static struct sysfs_ops sysfs_ops = {
26086 +static const struct sysfs_ops sysfs_ops = {
26087 .show = sysdev_show,
26088 .store = sysdev_store,
26089 };
26090 @@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
26091 return -EIO;
26092 }
26093
26094 -static struct sysfs_ops sysfs_class_ops = {
26095 +static const struct sysfs_ops sysfs_class_ops = {
26096 .show = sysdev_class_show,
26097 .store = sysdev_class_store,
26098 };
26099 diff -urNp linux-2.6.32.42/drivers/block/cciss.c linux-2.6.32.42/drivers/block/cciss.c
26100 --- linux-2.6.32.42/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
26101 +++ linux-2.6.32.42/drivers/block/cciss.c 2011-04-17 15:56:46.000000000 -0400
26102 @@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
26103 int err;
26104 u32 cp;
26105
26106 + memset(&arg64, 0, sizeof(arg64));
26107 +
26108 err = 0;
26109 err |=
26110 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26111 diff -urNp linux-2.6.32.42/drivers/block/cpqarray.c linux-2.6.32.42/drivers/block/cpqarray.c
26112 --- linux-2.6.32.42/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
26113 +++ linux-2.6.32.42/drivers/block/cpqarray.c 2011-05-16 21:46:57.000000000 -0400
26114 @@ -896,6 +896,8 @@ static void do_ida_request(struct reques
26115 struct scatterlist tmp_sg[SG_MAX];
26116 int i, dir, seg;
26117
26118 + pax_track_stack();
26119 +
26120 if (blk_queue_plugged(q))
26121 goto startio;
26122
26123 diff -urNp linux-2.6.32.42/drivers/block/DAC960.c linux-2.6.32.42/drivers/block/DAC960.c
26124 --- linux-2.6.32.42/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
26125 +++ linux-2.6.32.42/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
26126 @@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
26127 unsigned long flags;
26128 int Channel, TargetID;
26129
26130 + pax_track_stack();
26131 +
26132 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
26133 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
26134 sizeof(DAC960_SCSI_Inquiry_T) +
26135 diff -urNp linux-2.6.32.42/drivers/block/nbd.c linux-2.6.32.42/drivers/block/nbd.c
26136 --- linux-2.6.32.42/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
26137 +++ linux-2.6.32.42/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
26138 @@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
26139 struct kvec iov;
26140 sigset_t blocked, oldset;
26141
26142 + pax_track_stack();
26143 +
26144 if (unlikely(!sock)) {
26145 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
26146 lo->disk->disk_name, (send ? "send" : "recv"));
26147 @@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
26148 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
26149 unsigned int cmd, unsigned long arg)
26150 {
26151 + pax_track_stack();
26152 +
26153 switch (cmd) {
26154 case NBD_DISCONNECT: {
26155 struct request sreq;
26156 diff -urNp linux-2.6.32.42/drivers/block/pktcdvd.c linux-2.6.32.42/drivers/block/pktcdvd.c
26157 --- linux-2.6.32.42/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
26158 +++ linux-2.6.32.42/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
26159 @@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
26160 return len;
26161 }
26162
26163 -static struct sysfs_ops kobj_pkt_ops = {
26164 +static const struct sysfs_ops kobj_pkt_ops = {
26165 .show = kobj_pkt_show,
26166 .store = kobj_pkt_store
26167 };
26168 diff -urNp linux-2.6.32.42/drivers/char/agp/frontend.c linux-2.6.32.42/drivers/char/agp/frontend.c
26169 --- linux-2.6.32.42/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
26170 +++ linux-2.6.32.42/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
26171 @@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
26172 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26173 return -EFAULT;
26174
26175 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26176 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26177 return -EFAULT;
26178
26179 client = agp_find_client_by_pid(reserve.pid);
26180 diff -urNp linux-2.6.32.42/drivers/char/briq_panel.c linux-2.6.32.42/drivers/char/briq_panel.c
26181 --- linux-2.6.32.42/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
26182 +++ linux-2.6.32.42/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
26183 @@ -10,6 +10,7 @@
26184 #include <linux/types.h>
26185 #include <linux/errno.h>
26186 #include <linux/tty.h>
26187 +#include <linux/mutex.h>
26188 #include <linux/timer.h>
26189 #include <linux/kernel.h>
26190 #include <linux/wait.h>
26191 @@ -36,6 +37,7 @@ static int vfd_is_open;
26192 static unsigned char vfd[40];
26193 static int vfd_cursor;
26194 static unsigned char ledpb, led;
26195 +static DEFINE_MUTEX(vfd_mutex);
26196
26197 static void update_vfd(void)
26198 {
26199 @@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
26200 if (!vfd_is_open)
26201 return -EBUSY;
26202
26203 + mutex_lock(&vfd_mutex);
26204 for (;;) {
26205 char c;
26206 if (!indx)
26207 break;
26208 - if (get_user(c, buf))
26209 + if (get_user(c, buf)) {
26210 + mutex_unlock(&vfd_mutex);
26211 return -EFAULT;
26212 + }
26213 if (esc) {
26214 set_led(c);
26215 esc = 0;
26216 @@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
26217 buf++;
26218 }
26219 update_vfd();
26220 + mutex_unlock(&vfd_mutex);
26221
26222 return len;
26223 }
26224 diff -urNp linux-2.6.32.42/drivers/char/genrtc.c linux-2.6.32.42/drivers/char/genrtc.c
26225 --- linux-2.6.32.42/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
26226 +++ linux-2.6.32.42/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
26227 @@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
26228 switch (cmd) {
26229
26230 case RTC_PLL_GET:
26231 + memset(&pll, 0, sizeof(pll));
26232 if (get_rtc_pll(&pll))
26233 return -EINVAL;
26234 else
26235 diff -urNp linux-2.6.32.42/drivers/char/hpet.c linux-2.6.32.42/drivers/char/hpet.c
26236 --- linux-2.6.32.42/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
26237 +++ linux-2.6.32.42/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
26238 @@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
26239 return 0;
26240 }
26241
26242 -static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
26243 +static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
26244
26245 static int
26246 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
26247 @@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
26248 }
26249
26250 static int
26251 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
26252 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
26253 {
26254 struct hpet_timer __iomem *timer;
26255 struct hpet __iomem *hpet;
26256 @@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
26257 {
26258 struct hpet_info info;
26259
26260 + memset(&info, 0, sizeof(info));
26261 +
26262 if (devp->hd_ireqfreq)
26263 info.hi_ireqfreq =
26264 hpet_time_div(hpetp, devp->hd_ireqfreq);
26265 - else
26266 - info.hi_ireqfreq = 0;
26267 info.hi_flags =
26268 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
26269 info.hi_hpet = hpetp->hp_which;
26270 diff -urNp linux-2.6.32.42/drivers/char/hvc_beat.c linux-2.6.32.42/drivers/char/hvc_beat.c
26271 --- linux-2.6.32.42/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
26272 +++ linux-2.6.32.42/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
26273 @@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
26274 return cnt;
26275 }
26276
26277 -static struct hv_ops hvc_beat_get_put_ops = {
26278 +static const struct hv_ops hvc_beat_get_put_ops = {
26279 .get_chars = hvc_beat_get_chars,
26280 .put_chars = hvc_beat_put_chars,
26281 };
26282 diff -urNp linux-2.6.32.42/drivers/char/hvc_console.c linux-2.6.32.42/drivers/char/hvc_console.c
26283 --- linux-2.6.32.42/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
26284 +++ linux-2.6.32.42/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
26285 @@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
26286 * console interfaces but can still be used as a tty device. This has to be
26287 * static because kmalloc will not work during early console init.
26288 */
26289 -static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26290 +static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26291 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
26292 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
26293
26294 @@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
26295 * vty adapters do NOT get an hvc_instantiate() callback since they
26296 * appear after early console init.
26297 */
26298 -int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
26299 +int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
26300 {
26301 struct hvc_struct *hp;
26302
26303 @@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
26304 };
26305
26306 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
26307 - struct hv_ops *ops, int outbuf_size)
26308 + const struct hv_ops *ops, int outbuf_size)
26309 {
26310 struct hvc_struct *hp;
26311 int i;
26312 diff -urNp linux-2.6.32.42/drivers/char/hvc_console.h linux-2.6.32.42/drivers/char/hvc_console.h
26313 --- linux-2.6.32.42/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
26314 +++ linux-2.6.32.42/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
26315 @@ -55,7 +55,7 @@ struct hvc_struct {
26316 int outbuf_size;
26317 int n_outbuf;
26318 uint32_t vtermno;
26319 - struct hv_ops *ops;
26320 + const struct hv_ops *ops;
26321 int irq_requested;
26322 int data;
26323 struct winsize ws;
26324 @@ -76,11 +76,11 @@ struct hv_ops {
26325 };
26326
26327 /* Register a vterm and a slot index for use as a console (console_init) */
26328 -extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
26329 +extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
26330
26331 /* register a vterm for hvc tty operation (module_init or hotplug add) */
26332 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
26333 - struct hv_ops *ops, int outbuf_size);
26334 + const struct hv_ops *ops, int outbuf_size);
26335 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
26336 extern int hvc_remove(struct hvc_struct *hp);
26337
26338 diff -urNp linux-2.6.32.42/drivers/char/hvc_iseries.c linux-2.6.32.42/drivers/char/hvc_iseries.c
26339 --- linux-2.6.32.42/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
26340 +++ linux-2.6.32.42/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
26341 @@ -197,7 +197,7 @@ done:
26342 return sent;
26343 }
26344
26345 -static struct hv_ops hvc_get_put_ops = {
26346 +static const struct hv_ops hvc_get_put_ops = {
26347 .get_chars = get_chars,
26348 .put_chars = put_chars,
26349 .notifier_add = notifier_add_irq,
26350 diff -urNp linux-2.6.32.42/drivers/char/hvc_iucv.c linux-2.6.32.42/drivers/char/hvc_iucv.c
26351 --- linux-2.6.32.42/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
26352 +++ linux-2.6.32.42/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
26353 @@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
26354
26355
26356 /* HVC operations */
26357 -static struct hv_ops hvc_iucv_ops = {
26358 +static const struct hv_ops hvc_iucv_ops = {
26359 .get_chars = hvc_iucv_get_chars,
26360 .put_chars = hvc_iucv_put_chars,
26361 .notifier_add = hvc_iucv_notifier_add,
26362 diff -urNp linux-2.6.32.42/drivers/char/hvc_rtas.c linux-2.6.32.42/drivers/char/hvc_rtas.c
26363 --- linux-2.6.32.42/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
26364 +++ linux-2.6.32.42/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
26365 @@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
26366 return i;
26367 }
26368
26369 -static struct hv_ops hvc_rtas_get_put_ops = {
26370 +static const struct hv_ops hvc_rtas_get_put_ops = {
26371 .get_chars = hvc_rtas_read_console,
26372 .put_chars = hvc_rtas_write_console,
26373 };
26374 diff -urNp linux-2.6.32.42/drivers/char/hvcs.c linux-2.6.32.42/drivers/char/hvcs.c
26375 --- linux-2.6.32.42/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
26376 +++ linux-2.6.32.42/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
26377 @@ -82,6 +82,7 @@
26378 #include <asm/hvcserver.h>
26379 #include <asm/uaccess.h>
26380 #include <asm/vio.h>
26381 +#include <asm/local.h>
26382
26383 /*
26384 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
26385 @@ -269,7 +270,7 @@ struct hvcs_struct {
26386 unsigned int index;
26387
26388 struct tty_struct *tty;
26389 - int open_count;
26390 + local_t open_count;
26391
26392 /*
26393 * Used to tell the driver kernel_thread what operations need to take
26394 @@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
26395
26396 spin_lock_irqsave(&hvcsd->lock, flags);
26397
26398 - if (hvcsd->open_count > 0) {
26399 + if (local_read(&hvcsd->open_count) > 0) {
26400 spin_unlock_irqrestore(&hvcsd->lock, flags);
26401 printk(KERN_INFO "HVCS: vterm state unchanged. "
26402 "The hvcs device node is still in use.\n");
26403 @@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
26404 if ((retval = hvcs_partner_connect(hvcsd)))
26405 goto error_release;
26406
26407 - hvcsd->open_count = 1;
26408 + local_set(&hvcsd->open_count, 1);
26409 hvcsd->tty = tty;
26410 tty->driver_data = hvcsd;
26411
26412 @@ -1169,7 +1170,7 @@ fast_open:
26413
26414 spin_lock_irqsave(&hvcsd->lock, flags);
26415 kref_get(&hvcsd->kref);
26416 - hvcsd->open_count++;
26417 + local_inc(&hvcsd->open_count);
26418 hvcsd->todo_mask |= HVCS_SCHED_READ;
26419 spin_unlock_irqrestore(&hvcsd->lock, flags);
26420
26421 @@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
26422 hvcsd = tty->driver_data;
26423
26424 spin_lock_irqsave(&hvcsd->lock, flags);
26425 - if (--hvcsd->open_count == 0) {
26426 + if (local_dec_and_test(&hvcsd->open_count)) {
26427
26428 vio_disable_interrupts(hvcsd->vdev);
26429
26430 @@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
26431 free_irq(irq, hvcsd);
26432 kref_put(&hvcsd->kref, destroy_hvcs_struct);
26433 return;
26434 - } else if (hvcsd->open_count < 0) {
26435 + } else if (local_read(&hvcsd->open_count) < 0) {
26436 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
26437 " is missmanaged.\n",
26438 - hvcsd->vdev->unit_address, hvcsd->open_count);
26439 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
26440 }
26441
26442 spin_unlock_irqrestore(&hvcsd->lock, flags);
26443 @@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
26444
26445 spin_lock_irqsave(&hvcsd->lock, flags);
26446 /* Preserve this so that we know how many kref refs to put */
26447 - temp_open_count = hvcsd->open_count;
26448 + temp_open_count = local_read(&hvcsd->open_count);
26449
26450 /*
26451 * Don't kref put inside the spinlock because the destruction
26452 @@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
26453 hvcsd->tty->driver_data = NULL;
26454 hvcsd->tty = NULL;
26455
26456 - hvcsd->open_count = 0;
26457 + local_set(&hvcsd->open_count, 0);
26458
26459 /* This will drop any buffered data on the floor which is OK in a hangup
26460 * scenario. */
26461 @@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
26462 * the middle of a write operation? This is a crummy place to do this
26463 * but we want to keep it all in the spinlock.
26464 */
26465 - if (hvcsd->open_count <= 0) {
26466 + if (local_read(&hvcsd->open_count) <= 0) {
26467 spin_unlock_irqrestore(&hvcsd->lock, flags);
26468 return -ENODEV;
26469 }
26470 @@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
26471 {
26472 struct hvcs_struct *hvcsd = tty->driver_data;
26473
26474 - if (!hvcsd || hvcsd->open_count <= 0)
26475 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
26476 return 0;
26477
26478 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
26479 diff -urNp linux-2.6.32.42/drivers/char/hvc_udbg.c linux-2.6.32.42/drivers/char/hvc_udbg.c
26480 --- linux-2.6.32.42/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
26481 +++ linux-2.6.32.42/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
26482 @@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
26483 return i;
26484 }
26485
26486 -static struct hv_ops hvc_udbg_ops = {
26487 +static const struct hv_ops hvc_udbg_ops = {
26488 .get_chars = hvc_udbg_get,
26489 .put_chars = hvc_udbg_put,
26490 };
26491 diff -urNp linux-2.6.32.42/drivers/char/hvc_vio.c linux-2.6.32.42/drivers/char/hvc_vio.c
26492 --- linux-2.6.32.42/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
26493 +++ linux-2.6.32.42/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
26494 @@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
26495 return got;
26496 }
26497
26498 -static struct hv_ops hvc_get_put_ops = {
26499 +static const struct hv_ops hvc_get_put_ops = {
26500 .get_chars = filtered_get_chars,
26501 .put_chars = hvc_put_chars,
26502 .notifier_add = notifier_add_irq,
26503 diff -urNp linux-2.6.32.42/drivers/char/hvc_xen.c linux-2.6.32.42/drivers/char/hvc_xen.c
26504 --- linux-2.6.32.42/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
26505 +++ linux-2.6.32.42/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
26506 @@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
26507 return recv;
26508 }
26509
26510 -static struct hv_ops hvc_ops = {
26511 +static const struct hv_ops hvc_ops = {
26512 .get_chars = read_console,
26513 .put_chars = write_console,
26514 .notifier_add = notifier_add_irq,
26515 diff -urNp linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c
26516 --- linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
26517 +++ linux-2.6.32.42/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
26518 @@ -414,7 +414,7 @@ struct ipmi_smi {
26519 struct proc_dir_entry *proc_dir;
26520 char proc_dir_name[10];
26521
26522 - atomic_t stats[IPMI_NUM_STATS];
26523 + atomic_unchecked_t stats[IPMI_NUM_STATS];
26524
26525 /*
26526 * run_to_completion duplicate of smb_info, smi_info
26527 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
26528
26529
26530 #define ipmi_inc_stat(intf, stat) \
26531 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
26532 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
26533 #define ipmi_get_stat(intf, stat) \
26534 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
26535 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
26536
26537 static int is_lan_addr(struct ipmi_addr *addr)
26538 {
26539 @@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
26540 INIT_LIST_HEAD(&intf->cmd_rcvrs);
26541 init_waitqueue_head(&intf->waitq);
26542 for (i = 0; i < IPMI_NUM_STATS; i++)
26543 - atomic_set(&intf->stats[i], 0);
26544 + atomic_set_unchecked(&intf->stats[i], 0);
26545
26546 intf->proc_dir = NULL;
26547
26548 @@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
26549 struct ipmi_smi_msg smi_msg;
26550 struct ipmi_recv_msg recv_msg;
26551
26552 + pax_track_stack();
26553 +
26554 si = (struct ipmi_system_interface_addr *) &addr;
26555 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
26556 si->channel = IPMI_BMC_CHANNEL;
26557 diff -urNp linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c
26558 --- linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
26559 +++ linux-2.6.32.42/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
26560 @@ -277,7 +277,7 @@ struct smi_info {
26561 unsigned char slave_addr;
26562
26563 /* Counters and things for the proc filesystem. */
26564 - atomic_t stats[SI_NUM_STATS];
26565 + atomic_unchecked_t stats[SI_NUM_STATS];
26566
26567 struct task_struct *thread;
26568
26569 @@ -285,9 +285,9 @@ struct smi_info {
26570 };
26571
26572 #define smi_inc_stat(smi, stat) \
26573 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
26574 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
26575 #define smi_get_stat(smi, stat) \
26576 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
26577 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
26578
26579 #define SI_MAX_PARMS 4
26580
26581 @@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
26582 atomic_set(&new_smi->req_events, 0);
26583 new_smi->run_to_completion = 0;
26584 for (i = 0; i < SI_NUM_STATS; i++)
26585 - atomic_set(&new_smi->stats[i], 0);
26586 + atomic_set_unchecked(&new_smi->stats[i], 0);
26587
26588 new_smi->interrupt_disabled = 0;
26589 atomic_set(&new_smi->stop_operation, 0);
26590 diff -urNp linux-2.6.32.42/drivers/char/istallion.c linux-2.6.32.42/drivers/char/istallion.c
26591 --- linux-2.6.32.42/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
26592 +++ linux-2.6.32.42/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
26593 @@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
26594 * re-used for each stats call.
26595 */
26596 static comstats_t stli_comstats;
26597 -static combrd_t stli_brdstats;
26598 static struct asystats stli_cdkstats;
26599
26600 /*****************************************************************************/
26601 @@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
26602 {
26603 struct stlibrd *brdp;
26604 unsigned int i;
26605 + combrd_t stli_brdstats;
26606
26607 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
26608 return -EFAULT;
26609 @@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
26610 struct stliport stli_dummyport;
26611 struct stliport *portp;
26612
26613 + pax_track_stack();
26614 +
26615 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
26616 return -EFAULT;
26617 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
26618 @@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
26619 struct stlibrd stli_dummybrd;
26620 struct stlibrd *brdp;
26621
26622 + pax_track_stack();
26623 +
26624 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
26625 return -EFAULT;
26626 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
26627 diff -urNp linux-2.6.32.42/drivers/char/Kconfig linux-2.6.32.42/drivers/char/Kconfig
26628 --- linux-2.6.32.42/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
26629 +++ linux-2.6.32.42/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
26630 @@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
26631
26632 config DEVKMEM
26633 bool "/dev/kmem virtual device support"
26634 - default y
26635 + default n
26636 + depends on !GRKERNSEC_KMEM
26637 help
26638 Say Y here if you want to support the /dev/kmem device. The
26639 /dev/kmem device is rarely used, but can be used for certain
26640 @@ -1114,6 +1115,7 @@ config DEVPORT
26641 bool
26642 depends on !M68K
26643 depends on ISA || PCI
26644 + depends on !GRKERNSEC_KMEM
26645 default y
26646
26647 source "drivers/s390/char/Kconfig"
26648 diff -urNp linux-2.6.32.42/drivers/char/keyboard.c linux-2.6.32.42/drivers/char/keyboard.c
26649 --- linux-2.6.32.42/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
26650 +++ linux-2.6.32.42/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
26651 @@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
26652 kbd->kbdmode == VC_MEDIUMRAW) &&
26653 value != KVAL(K_SAK))
26654 return; /* SAK is allowed even in raw mode */
26655 +
26656 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
26657 + {
26658 + void *func = fn_handler[value];
26659 + if (func == fn_show_state || func == fn_show_ptregs ||
26660 + func == fn_show_mem)
26661 + return;
26662 + }
26663 +#endif
26664 +
26665 fn_handler[value](vc);
26666 }
26667
26668 @@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
26669 .evbit = { BIT_MASK(EV_SND) },
26670 },
26671
26672 - { }, /* Terminating entry */
26673 + { 0 }, /* Terminating entry */
26674 };
26675
26676 MODULE_DEVICE_TABLE(input, kbd_ids);
26677 diff -urNp linux-2.6.32.42/drivers/char/mem.c linux-2.6.32.42/drivers/char/mem.c
26678 --- linux-2.6.32.42/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
26679 +++ linux-2.6.32.42/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
26680 @@ -18,6 +18,7 @@
26681 #include <linux/raw.h>
26682 #include <linux/tty.h>
26683 #include <linux/capability.h>
26684 +#include <linux/security.h>
26685 #include <linux/ptrace.h>
26686 #include <linux/device.h>
26687 #include <linux/highmem.h>
26688 @@ -35,6 +36,10 @@
26689 # include <linux/efi.h>
26690 #endif
26691
26692 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26693 +extern struct file_operations grsec_fops;
26694 +#endif
26695 +
26696 static inline unsigned long size_inside_page(unsigned long start,
26697 unsigned long size)
26698 {
26699 @@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
26700
26701 while (cursor < to) {
26702 if (!devmem_is_allowed(pfn)) {
26703 +#ifdef CONFIG_GRKERNSEC_KMEM
26704 + gr_handle_mem_readwrite(from, to);
26705 +#else
26706 printk(KERN_INFO
26707 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
26708 current->comm, from, to);
26709 +#endif
26710 return 0;
26711 }
26712 cursor += PAGE_SIZE;
26713 @@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
26714 }
26715 return 1;
26716 }
26717 +#elif defined(CONFIG_GRKERNSEC_KMEM)
26718 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26719 +{
26720 + return 0;
26721 +}
26722 #else
26723 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
26724 {
26725 @@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
26726 #endif
26727
26728 while (count > 0) {
26729 + char *temp;
26730 +
26731 /*
26732 * Handle first page in case it's not aligned
26733 */
26734 @@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
26735 if (!ptr)
26736 return -EFAULT;
26737
26738 - if (copy_to_user(buf, ptr, sz)) {
26739 +#ifdef CONFIG_PAX_USERCOPY
26740 + temp = kmalloc(sz, GFP_KERNEL);
26741 + if (!temp) {
26742 + unxlate_dev_mem_ptr(p, ptr);
26743 + return -ENOMEM;
26744 + }
26745 + memcpy(temp, ptr, sz);
26746 +#else
26747 + temp = ptr;
26748 +#endif
26749 +
26750 + if (copy_to_user(buf, temp, sz)) {
26751 +
26752 +#ifdef CONFIG_PAX_USERCOPY
26753 + kfree(temp);
26754 +#endif
26755 +
26756 unxlate_dev_mem_ptr(p, ptr);
26757 return -EFAULT;
26758 }
26759
26760 +#ifdef CONFIG_PAX_USERCOPY
26761 + kfree(temp);
26762 +#endif
26763 +
26764 unxlate_dev_mem_ptr(p, ptr);
26765
26766 buf += sz;
26767 @@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
26768 size_t count, loff_t *ppos)
26769 {
26770 unsigned long p = *ppos;
26771 - ssize_t low_count, read, sz;
26772 + ssize_t low_count, read, sz, err = 0;
26773 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
26774 - int err = 0;
26775
26776 read = 0;
26777 if (p < (unsigned long) high_memory) {
26778 @@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
26779 }
26780 #endif
26781 while (low_count > 0) {
26782 + char *temp;
26783 +
26784 sz = size_inside_page(p, low_count);
26785
26786 /*
26787 @@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
26788 */
26789 kbuf = xlate_dev_kmem_ptr((char *)p);
26790
26791 - if (copy_to_user(buf, kbuf, sz))
26792 +#ifdef CONFIG_PAX_USERCOPY
26793 + temp = kmalloc(sz, GFP_KERNEL);
26794 + if (!temp)
26795 + return -ENOMEM;
26796 + memcpy(temp, kbuf, sz);
26797 +#else
26798 + temp = kbuf;
26799 +#endif
26800 +
26801 + err = copy_to_user(buf, temp, sz);
26802 +
26803 +#ifdef CONFIG_PAX_USERCOPY
26804 + kfree(temp);
26805 +#endif
26806 +
26807 + if (err)
26808 return -EFAULT;
26809 buf += sz;
26810 p += sz;
26811 @@ -889,6 +941,9 @@ static const struct memdev {
26812 #ifdef CONFIG_CRASH_DUMP
26813 [12] = { "oldmem", 0, &oldmem_fops, NULL },
26814 #endif
26815 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
26816 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
26817 +#endif
26818 };
26819
26820 static int memory_open(struct inode *inode, struct file *filp)
26821 diff -urNp linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c
26822 --- linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
26823 +++ linux-2.6.32.42/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
26824 @@ -29,6 +29,7 @@
26825 #include <linux/tty_driver.h>
26826 #include <linux/tty_flip.h>
26827 #include <linux/uaccess.h>
26828 +#include <asm/local.h>
26829
26830 #include "tty.h"
26831 #include "network.h"
26832 @@ -51,7 +52,7 @@ struct ipw_tty {
26833 int tty_type;
26834 struct ipw_network *network;
26835 struct tty_struct *linux_tty;
26836 - int open_count;
26837 + local_t open_count;
26838 unsigned int control_lines;
26839 struct mutex ipw_tty_mutex;
26840 int tx_bytes_queued;
26841 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
26842 mutex_unlock(&tty->ipw_tty_mutex);
26843 return -ENODEV;
26844 }
26845 - if (tty->open_count == 0)
26846 + if (local_read(&tty->open_count) == 0)
26847 tty->tx_bytes_queued = 0;
26848
26849 - tty->open_count++;
26850 + local_inc(&tty->open_count);
26851
26852 tty->linux_tty = linux_tty;
26853 linux_tty->driver_data = tty;
26854 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
26855
26856 static void do_ipw_close(struct ipw_tty *tty)
26857 {
26858 - tty->open_count--;
26859 -
26860 - if (tty->open_count == 0) {
26861 + if (local_dec_return(&tty->open_count) == 0) {
26862 struct tty_struct *linux_tty = tty->linux_tty;
26863
26864 if (linux_tty != NULL) {
26865 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
26866 return;
26867
26868 mutex_lock(&tty->ipw_tty_mutex);
26869 - if (tty->open_count == 0) {
26870 + if (local_read(&tty->open_count) == 0) {
26871 mutex_unlock(&tty->ipw_tty_mutex);
26872 return;
26873 }
26874 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
26875 return;
26876 }
26877
26878 - if (!tty->open_count) {
26879 + if (!local_read(&tty->open_count)) {
26880 mutex_unlock(&tty->ipw_tty_mutex);
26881 return;
26882 }
26883 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
26884 return -ENODEV;
26885
26886 mutex_lock(&tty->ipw_tty_mutex);
26887 - if (!tty->open_count) {
26888 + if (!local_read(&tty->open_count)) {
26889 mutex_unlock(&tty->ipw_tty_mutex);
26890 return -EINVAL;
26891 }
26892 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
26893 if (!tty)
26894 return -ENODEV;
26895
26896 - if (!tty->open_count)
26897 + if (!local_read(&tty->open_count))
26898 return -EINVAL;
26899
26900 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
26901 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
26902 if (!tty)
26903 return 0;
26904
26905 - if (!tty->open_count)
26906 + if (!local_read(&tty->open_count))
26907 return 0;
26908
26909 return tty->tx_bytes_queued;
26910 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
26911 if (!tty)
26912 return -ENODEV;
26913
26914 - if (!tty->open_count)
26915 + if (!local_read(&tty->open_count))
26916 return -EINVAL;
26917
26918 return get_control_lines(tty);
26919 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
26920 if (!tty)
26921 return -ENODEV;
26922
26923 - if (!tty->open_count)
26924 + if (!local_read(&tty->open_count))
26925 return -EINVAL;
26926
26927 return set_control_lines(tty, set, clear);
26928 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
26929 if (!tty)
26930 return -ENODEV;
26931
26932 - if (!tty->open_count)
26933 + if (!local_read(&tty->open_count))
26934 return -EINVAL;
26935
26936 /* FIXME: Exactly how is the tty object locked here .. */
26937 @@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
26938 against a parallel ioctl etc */
26939 mutex_lock(&ttyj->ipw_tty_mutex);
26940 }
26941 - while (ttyj->open_count)
26942 + while (local_read(&ttyj->open_count))
26943 do_ipw_close(ttyj);
26944 ipwireless_disassociate_network_ttys(network,
26945 ttyj->channel_idx);
26946 diff -urNp linux-2.6.32.42/drivers/char/pty.c linux-2.6.32.42/drivers/char/pty.c
26947 --- linux-2.6.32.42/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
26948 +++ linux-2.6.32.42/drivers/char/pty.c 2011-04-17 15:56:46.000000000 -0400
26949 @@ -682,7 +682,18 @@ static int ptmx_open(struct inode *inode
26950 return ret;
26951 }
26952
26953 -static struct file_operations ptmx_fops;
26954 +static const struct file_operations ptmx_fops = {
26955 + .llseek = no_llseek,
26956 + .read = tty_read,
26957 + .write = tty_write,
26958 + .poll = tty_poll,
26959 + .unlocked_ioctl = tty_ioctl,
26960 + .compat_ioctl = tty_compat_ioctl,
26961 + .open = ptmx_open,
26962 + .release = tty_release,
26963 + .fasync = tty_fasync,
26964 +};
26965 +
26966
26967 static void __init unix98_pty_init(void)
26968 {
26969 @@ -736,9 +747,6 @@ static void __init unix98_pty_init(void)
26970 register_sysctl_table(pty_root_table);
26971
26972 /* Now create the /dev/ptmx special device */
26973 - tty_default_fops(&ptmx_fops);
26974 - ptmx_fops.open = ptmx_open;
26975 -
26976 cdev_init(&ptmx_cdev, &ptmx_fops);
26977 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
26978 register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
26979 diff -urNp linux-2.6.32.42/drivers/char/random.c linux-2.6.32.42/drivers/char/random.c
26980 --- linux-2.6.32.42/drivers/char/random.c 2011-03-27 14:31:47.000000000 -0400
26981 +++ linux-2.6.32.42/drivers/char/random.c 2011-04-17 15:56:46.000000000 -0400
26982 @@ -254,8 +254,13 @@
26983 /*
26984 * Configuration information
26985 */
26986 +#ifdef CONFIG_GRKERNSEC_RANDNET
26987 +#define INPUT_POOL_WORDS 512
26988 +#define OUTPUT_POOL_WORDS 128
26989 +#else
26990 #define INPUT_POOL_WORDS 128
26991 #define OUTPUT_POOL_WORDS 32
26992 +#endif
26993 #define SEC_XFER_SIZE 512
26994
26995 /*
26996 @@ -292,10 +297,17 @@ static struct poolinfo {
26997 int poolwords;
26998 int tap1, tap2, tap3, tap4, tap5;
26999 } poolinfo_table[] = {
27000 +#ifdef CONFIG_GRKERNSEC_RANDNET
27001 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27002 + { 512, 411, 308, 208, 104, 1 },
27003 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27004 + { 128, 103, 76, 51, 25, 1 },
27005 +#else
27006 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27007 { 128, 103, 76, 51, 25, 1 },
27008 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27009 { 32, 26, 20, 14, 7, 1 },
27010 +#endif
27011 #if 0
27012 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27013 { 2048, 1638, 1231, 819, 411, 1 },
27014 @@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27015 #include <linux/sysctl.h>
27016
27017 static int min_read_thresh = 8, min_write_thresh;
27018 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
27019 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27020 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27021 static char sysctl_bootid[16];
27022
27023 diff -urNp linux-2.6.32.42/drivers/char/rocket.c linux-2.6.32.42/drivers/char/rocket.c
27024 --- linux-2.6.32.42/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
27025 +++ linux-2.6.32.42/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
27026 @@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
27027 struct rocket_ports tmp;
27028 int board;
27029
27030 + pax_track_stack();
27031 +
27032 if (!retports)
27033 return -EFAULT;
27034 memset(&tmp, 0, sizeof (tmp));
27035 diff -urNp linux-2.6.32.42/drivers/char/sonypi.c linux-2.6.32.42/drivers/char/sonypi.c
27036 --- linux-2.6.32.42/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
27037 +++ linux-2.6.32.42/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
27038 @@ -55,6 +55,7 @@
27039 #include <asm/uaccess.h>
27040 #include <asm/io.h>
27041 #include <asm/system.h>
27042 +#include <asm/local.h>
27043
27044 #include <linux/sonypi.h>
27045
27046 @@ -491,7 +492,7 @@ static struct sonypi_device {
27047 spinlock_t fifo_lock;
27048 wait_queue_head_t fifo_proc_list;
27049 struct fasync_struct *fifo_async;
27050 - int open_count;
27051 + local_t open_count;
27052 int model;
27053 struct input_dev *input_jog_dev;
27054 struct input_dev *input_key_dev;
27055 @@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
27056 static int sonypi_misc_release(struct inode *inode, struct file *file)
27057 {
27058 mutex_lock(&sonypi_device.lock);
27059 - sonypi_device.open_count--;
27060 + local_dec(&sonypi_device.open_count);
27061 mutex_unlock(&sonypi_device.lock);
27062 return 0;
27063 }
27064 @@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
27065 lock_kernel();
27066 mutex_lock(&sonypi_device.lock);
27067 /* Flush input queue on first open */
27068 - if (!sonypi_device.open_count)
27069 + if (!local_read(&sonypi_device.open_count))
27070 kfifo_reset(sonypi_device.fifo);
27071 - sonypi_device.open_count++;
27072 + local_inc(&sonypi_device.open_count);
27073 mutex_unlock(&sonypi_device.lock);
27074 unlock_kernel();
27075 return 0;
27076 diff -urNp linux-2.6.32.42/drivers/char/stallion.c linux-2.6.32.42/drivers/char/stallion.c
27077 --- linux-2.6.32.42/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
27078 +++ linux-2.6.32.42/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
27079 @@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
27080 struct stlport stl_dummyport;
27081 struct stlport *portp;
27082
27083 + pax_track_stack();
27084 +
27085 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
27086 return -EFAULT;
27087 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
27088 diff -urNp linux-2.6.32.42/drivers/char/tpm/tpm_bios.c linux-2.6.32.42/drivers/char/tpm/tpm_bios.c
27089 --- linux-2.6.32.42/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
27090 +++ linux-2.6.32.42/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
27091 @@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
27092 event = addr;
27093
27094 if ((event->event_type == 0 && event->event_size == 0) ||
27095 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27096 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27097 return NULL;
27098
27099 return addr;
27100 @@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
27101 return NULL;
27102
27103 if ((event->event_type == 0 && event->event_size == 0) ||
27104 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27105 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27106 return NULL;
27107
27108 (*pos)++;
27109 @@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
27110 int i;
27111
27112 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27113 - seq_putc(m, data[i]);
27114 + if (!seq_putc(m, data[i]))
27115 + return -EFAULT;
27116
27117 return 0;
27118 }
27119 @@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
27120 log->bios_event_log_end = log->bios_event_log + len;
27121
27122 virt = acpi_os_map_memory(start, len);
27123 + if (!virt) {
27124 + kfree(log->bios_event_log);
27125 + log->bios_event_log = NULL;
27126 + return -EFAULT;
27127 + }
27128
27129 memcpy(log->bios_event_log, virt, len);
27130
27131 diff -urNp linux-2.6.32.42/drivers/char/tpm/tpm.c linux-2.6.32.42/drivers/char/tpm/tpm.c
27132 --- linux-2.6.32.42/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
27133 +++ linux-2.6.32.42/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
27134 @@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
27135 chip->vendor.req_complete_val)
27136 goto out_recv;
27137
27138 - if ((status == chip->vendor.req_canceled)) {
27139 + if (status == chip->vendor.req_canceled) {
27140 dev_err(chip->dev, "Operation Canceled\n");
27141 rc = -ECANCELED;
27142 goto out;
27143 @@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
27144
27145 struct tpm_chip *chip = dev_get_drvdata(dev);
27146
27147 + pax_track_stack();
27148 +
27149 tpm_cmd.header.in = tpm_readpubek_header;
27150 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
27151 "attempting to read the PUBEK");
27152 diff -urNp linux-2.6.32.42/drivers/char/tty_io.c linux-2.6.32.42/drivers/char/tty_io.c
27153 --- linux-2.6.32.42/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
27154 +++ linux-2.6.32.42/drivers/char/tty_io.c 2011-04-17 15:56:46.000000000 -0400
27155 @@ -136,21 +136,10 @@ LIST_HEAD(tty_drivers); /* linked list
27156 DEFINE_MUTEX(tty_mutex);
27157 EXPORT_SYMBOL(tty_mutex);
27158
27159 -static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
27160 -static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
27161 ssize_t redirected_tty_write(struct file *, const char __user *,
27162 size_t, loff_t *);
27163 -static unsigned int tty_poll(struct file *, poll_table *);
27164 static int tty_open(struct inode *, struct file *);
27165 -static int tty_release(struct inode *, struct file *);
27166 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
27167 -#ifdef CONFIG_COMPAT
27168 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27169 - unsigned long arg);
27170 -#else
27171 -#define tty_compat_ioctl NULL
27172 -#endif
27173 -static int tty_fasync(int fd, struct file *filp, int on);
27174 static void release_tty(struct tty_struct *tty, int idx);
27175 static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27176 static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27177 @@ -870,7 +859,7 @@ EXPORT_SYMBOL(start_tty);
27178 * read calls may be outstanding in parallel.
27179 */
27180
27181 -static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27182 +ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27183 loff_t *ppos)
27184 {
27185 int i;
27186 @@ -898,6 +887,8 @@ static ssize_t tty_read(struct file *fil
27187 return i;
27188 }
27189
27190 +EXPORT_SYMBOL(tty_read);
27191 +
27192 void tty_write_unlock(struct tty_struct *tty)
27193 {
27194 mutex_unlock(&tty->atomic_write_lock);
27195 @@ -1045,7 +1036,7 @@ void tty_write_message(struct tty_struct
27196 * write method will not be invoked in parallel for each device.
27197 */
27198
27199 -static ssize_t tty_write(struct file *file, const char __user *buf,
27200 +ssize_t tty_write(struct file *file, const char __user *buf,
27201 size_t count, loff_t *ppos)
27202 {
27203 struct tty_struct *tty;
27204 @@ -1072,6 +1063,8 @@ static ssize_t tty_write(struct file *fi
27205 return ret;
27206 }
27207
27208 +EXPORT_SYMBOL(tty_write);
27209 +
27210 ssize_t redirected_tty_write(struct file *file, const char __user *buf,
27211 size_t count, loff_t *ppos)
27212 {
27213 @@ -1867,7 +1860,7 @@ static int tty_open(struct inode *inode,
27214 * Takes bkl. See tty_release_dev
27215 */
27216
27217 -static int tty_release(struct inode *inode, struct file *filp)
27218 +int tty_release(struct inode *inode, struct file *filp)
27219 {
27220 lock_kernel();
27221 tty_release_dev(filp);
27222 @@ -1875,6 +1868,8 @@ static int tty_release(struct inode *ino
27223 return 0;
27224 }
27225
27226 +EXPORT_SYMBOL(tty_release);
27227 +
27228 /**
27229 * tty_poll - check tty status
27230 * @filp: file being polled
27231 @@ -1887,7 +1882,7 @@ static int tty_release(struct inode *ino
27232 * may be re-entered freely by other callers.
27233 */
27234
27235 -static unsigned int tty_poll(struct file *filp, poll_table *wait)
27236 +unsigned int tty_poll(struct file *filp, poll_table *wait)
27237 {
27238 struct tty_struct *tty;
27239 struct tty_ldisc *ld;
27240 @@ -1904,7 +1899,9 @@ static unsigned int tty_poll(struct file
27241 return ret;
27242 }
27243
27244 -static int tty_fasync(int fd, struct file *filp, int on)
27245 +EXPORT_SYMBOL(tty_poll);
27246 +
27247 +int tty_fasync(int fd, struct file *filp, int on)
27248 {
27249 struct tty_struct *tty;
27250 unsigned long flags;
27251 @@ -1948,6 +1945,8 @@ out:
27252 return retval;
27253 }
27254
27255 +EXPORT_SYMBOL(tty_fasync);
27256 +
27257 /**
27258 * tiocsti - fake input character
27259 * @tty: tty to fake input into
27260 @@ -2582,8 +2581,10 @@ long tty_ioctl(struct file *file, unsign
27261 return retval;
27262 }
27263
27264 +EXPORT_SYMBOL(tty_ioctl);
27265 +
27266 #ifdef CONFIG_COMPAT
27267 -static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27268 +long tty_compat_ioctl(struct file *file, unsigned int cmd,
27269 unsigned long arg)
27270 {
27271 struct inode *inode = file->f_dentry->d_inode;
27272 @@ -2607,6 +2608,8 @@ static long tty_compat_ioctl(struct file
27273
27274 return retval;
27275 }
27276 +
27277 +EXPORT_SYMBOL(tty_compat_ioctl);
27278 #endif
27279
27280 /*
27281 @@ -3050,11 +3053,6 @@ struct tty_struct *get_current_tty(void)
27282 }
27283 EXPORT_SYMBOL_GPL(get_current_tty);
27284
27285 -void tty_default_fops(struct file_operations *fops)
27286 -{
27287 - *fops = tty_fops;
27288 -}
27289 -
27290 /*
27291 * Initialize the console device. This is called *early*, so
27292 * we can't necessarily depend on lots of kernel help here.
27293 diff -urNp linux-2.6.32.42/drivers/char/tty_ldisc.c linux-2.6.32.42/drivers/char/tty_ldisc.c
27294 --- linux-2.6.32.42/drivers/char/tty_ldisc.c 2011-03-27 14:31:47.000000000 -0400
27295 +++ linux-2.6.32.42/drivers/char/tty_ldisc.c 2011-04-17 15:56:46.000000000 -0400
27296 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
27297 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
27298 struct tty_ldisc_ops *ldo = ld->ops;
27299
27300 - ldo->refcount--;
27301 + atomic_dec(&ldo->refcount);
27302 module_put(ldo->owner);
27303 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27304
27305 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
27306 spin_lock_irqsave(&tty_ldisc_lock, flags);
27307 tty_ldiscs[disc] = new_ldisc;
27308 new_ldisc->num = disc;
27309 - new_ldisc->refcount = 0;
27310 + atomic_set(&new_ldisc->refcount, 0);
27311 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27312
27313 return ret;
27314 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
27315 return -EINVAL;
27316
27317 spin_lock_irqsave(&tty_ldisc_lock, flags);
27318 - if (tty_ldiscs[disc]->refcount)
27319 + if (atomic_read(&tty_ldiscs[disc]->refcount))
27320 ret = -EBUSY;
27321 else
27322 tty_ldiscs[disc] = NULL;
27323 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
27324 if (ldops) {
27325 ret = ERR_PTR(-EAGAIN);
27326 if (try_module_get(ldops->owner)) {
27327 - ldops->refcount++;
27328 + atomic_inc(&ldops->refcount);
27329 ret = ldops;
27330 }
27331 }
27332 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
27333 unsigned long flags;
27334
27335 spin_lock_irqsave(&tty_ldisc_lock, flags);
27336 - ldops->refcount--;
27337 + atomic_dec(&ldops->refcount);
27338 module_put(ldops->owner);
27339 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
27340 }
27341 diff -urNp linux-2.6.32.42/drivers/char/virtio_console.c linux-2.6.32.42/drivers/char/virtio_console.c
27342 --- linux-2.6.32.42/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
27343 +++ linux-2.6.32.42/drivers/char/virtio_console.c 2011-04-17 15:56:46.000000000 -0400
27344 @@ -44,6 +44,7 @@ static unsigned int in_len;
27345 static char *in, *inbuf;
27346
27347 /* The operations for our console. */
27348 +/* cannot be const */
27349 static struct hv_ops virtio_cons;
27350
27351 /* The hvc device */
27352 diff -urNp linux-2.6.32.42/drivers/char/vt.c linux-2.6.32.42/drivers/char/vt.c
27353 --- linux-2.6.32.42/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
27354 +++ linux-2.6.32.42/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
27355 @@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
27356
27357 static void notify_write(struct vc_data *vc, unsigned int unicode)
27358 {
27359 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
27360 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
27361 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
27362 }
27363
27364 diff -urNp linux-2.6.32.42/drivers/char/vt_ioctl.c linux-2.6.32.42/drivers/char/vt_ioctl.c
27365 --- linux-2.6.32.42/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
27366 +++ linux-2.6.32.42/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
27367 @@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
27368 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
27369 return -EFAULT;
27370
27371 - if (!capable(CAP_SYS_TTY_CONFIG))
27372 - perm = 0;
27373 -
27374 switch (cmd) {
27375 case KDGKBENT:
27376 key_map = key_maps[s];
27377 @@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
27378 val = (i ? K_HOLE : K_NOSUCHMAP);
27379 return put_user(val, &user_kbe->kb_value);
27380 case KDSKBENT:
27381 + if (!capable(CAP_SYS_TTY_CONFIG))
27382 + perm = 0;
27383 +
27384 if (!perm)
27385 return -EPERM;
27386 +
27387 if (!i && v == K_NOSUCHMAP) {
27388 /* deallocate map */
27389 key_map = key_maps[s];
27390 @@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
27391 int i, j, k;
27392 int ret;
27393
27394 - if (!capable(CAP_SYS_TTY_CONFIG))
27395 - perm = 0;
27396 -
27397 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
27398 if (!kbs) {
27399 ret = -ENOMEM;
27400 @@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
27401 kfree(kbs);
27402 return ((p && *p) ? -EOVERFLOW : 0);
27403 case KDSKBSENT:
27404 + if (!capable(CAP_SYS_TTY_CONFIG))
27405 + perm = 0;
27406 +
27407 if (!perm) {
27408 ret = -EPERM;
27409 goto reterr;
27410 diff -urNp linux-2.6.32.42/drivers/connector/Kconfig linux-2.6.32.42/drivers/connector/Kconfig
27411 --- linux-2.6.32.42/drivers/connector/Kconfig 2011-03-27 14:31:47.000000000 -0400
27412 +++ linux-2.6.32.42/drivers/connector/Kconfig 2011-06-20 17:54:56.000000000 -0400
27413 @@ -1,7 +1,7 @@
27414
27415 menuconfig CONNECTOR
27416 tristate "Connector - unified userspace <-> kernelspace linker"
27417 - depends on NET
27418 + depends on NET && !GRKERNSEC
27419 ---help---
27420 This is unified userspace <-> kernelspace connector working on top
27421 of the netlink socket protocol.
27422 @@ -13,7 +13,7 @@ if CONNECTOR
27423
27424 config PROC_EVENTS
27425 boolean "Report process events to userspace"
27426 - depends on CONNECTOR=y
27427 + depends on CONNECTOR=y && !GRKERNSEC
27428 default y
27429 ---help---
27430 Provide a connector that reports process events to userspace. Send
27431 diff -urNp linux-2.6.32.42/drivers/cpufreq/cpufreq.c linux-2.6.32.42/drivers/cpufreq/cpufreq.c
27432 --- linux-2.6.32.42/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
27433 +++ linux-2.6.32.42/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
27434 @@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
27435 complete(&policy->kobj_unregister);
27436 }
27437
27438 -static struct sysfs_ops sysfs_ops = {
27439 +static const struct sysfs_ops sysfs_ops = {
27440 .show = show,
27441 .store = store,
27442 };
27443 diff -urNp linux-2.6.32.42/drivers/cpuidle/sysfs.c linux-2.6.32.42/drivers/cpuidle/sysfs.c
27444 --- linux-2.6.32.42/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
27445 +++ linux-2.6.32.42/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
27446 @@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
27447 return ret;
27448 }
27449
27450 -static struct sysfs_ops cpuidle_sysfs_ops = {
27451 +static const struct sysfs_ops cpuidle_sysfs_ops = {
27452 .show = cpuidle_show,
27453 .store = cpuidle_store,
27454 };
27455 @@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
27456 return ret;
27457 }
27458
27459 -static struct sysfs_ops cpuidle_state_sysfs_ops = {
27460 +static const struct sysfs_ops cpuidle_state_sysfs_ops = {
27461 .show = cpuidle_state_show,
27462 };
27463
27464 @@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
27465 .release = cpuidle_state_sysfs_release,
27466 };
27467
27468 -static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
27469 +static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
27470 {
27471 kobject_put(&device->kobjs[i]->kobj);
27472 wait_for_completion(&device->kobjs[i]->kobj_unregister);
27473 diff -urNp linux-2.6.32.42/drivers/crypto/hifn_795x.c linux-2.6.32.42/drivers/crypto/hifn_795x.c
27474 --- linux-2.6.32.42/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
27475 +++ linux-2.6.32.42/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
27476 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
27477 0xCA, 0x34, 0x2B, 0x2E};
27478 struct scatterlist sg;
27479
27480 + pax_track_stack();
27481 +
27482 memset(src, 0, sizeof(src));
27483 memset(ctx.key, 0, sizeof(ctx.key));
27484
27485 diff -urNp linux-2.6.32.42/drivers/crypto/padlock-aes.c linux-2.6.32.42/drivers/crypto/padlock-aes.c
27486 --- linux-2.6.32.42/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
27487 +++ linux-2.6.32.42/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
27488 @@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
27489 struct crypto_aes_ctx gen_aes;
27490 int cpu;
27491
27492 + pax_track_stack();
27493 +
27494 if (key_len % 8) {
27495 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
27496 return -EINVAL;
27497 diff -urNp linux-2.6.32.42/drivers/dma/ioat/dma.c linux-2.6.32.42/drivers/dma/ioat/dma.c
27498 --- linux-2.6.32.42/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
27499 +++ linux-2.6.32.42/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
27500 @@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
27501 return entry->show(&chan->common, page);
27502 }
27503
27504 -struct sysfs_ops ioat_sysfs_ops = {
27505 +const struct sysfs_ops ioat_sysfs_ops = {
27506 .show = ioat_attr_show,
27507 };
27508
27509 diff -urNp linux-2.6.32.42/drivers/dma/ioat/dma.h linux-2.6.32.42/drivers/dma/ioat/dma.h
27510 --- linux-2.6.32.42/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
27511 +++ linux-2.6.32.42/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
27512 @@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
27513 unsigned long *phys_complete);
27514 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
27515 void ioat_kobject_del(struct ioatdma_device *device);
27516 -extern struct sysfs_ops ioat_sysfs_ops;
27517 +extern const struct sysfs_ops ioat_sysfs_ops;
27518 extern struct ioat_sysfs_entry ioat_version_attr;
27519 extern struct ioat_sysfs_entry ioat_cap_attr;
27520 #endif /* IOATDMA_H */
27521 diff -urNp linux-2.6.32.42/drivers/edac/edac_device_sysfs.c linux-2.6.32.42/drivers/edac/edac_device_sysfs.c
27522 --- linux-2.6.32.42/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27523 +++ linux-2.6.32.42/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
27524 @@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
27525 }
27526
27527 /* edac_dev file operations for an 'ctl_info' */
27528 -static struct sysfs_ops device_ctl_info_ops = {
27529 +static const struct sysfs_ops device_ctl_info_ops = {
27530 .show = edac_dev_ctl_info_show,
27531 .store = edac_dev_ctl_info_store
27532 };
27533 @@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
27534 }
27535
27536 /* edac_dev file operations for an 'instance' */
27537 -static struct sysfs_ops device_instance_ops = {
27538 +static const struct sysfs_ops device_instance_ops = {
27539 .show = edac_dev_instance_show,
27540 .store = edac_dev_instance_store
27541 };
27542 @@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
27543 }
27544
27545 /* edac_dev file operations for a 'block' */
27546 -static struct sysfs_ops device_block_ops = {
27547 +static const struct sysfs_ops device_block_ops = {
27548 .show = edac_dev_block_show,
27549 .store = edac_dev_block_store
27550 };
27551 diff -urNp linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c
27552 --- linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27553 +++ linux-2.6.32.42/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
27554 @@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
27555 return -EIO;
27556 }
27557
27558 -static struct sysfs_ops csrowfs_ops = {
27559 +static const struct sysfs_ops csrowfs_ops = {
27560 .show = csrowdev_show,
27561 .store = csrowdev_store
27562 };
27563 @@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
27564 }
27565
27566 /* Intermediate show/store table */
27567 -static struct sysfs_ops mci_ops = {
27568 +static const struct sysfs_ops mci_ops = {
27569 .show = mcidev_show,
27570 .store = mcidev_store
27571 };
27572 diff -urNp linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c
27573 --- linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
27574 +++ linux-2.6.32.42/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
27575 @@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
27576 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
27577 static int edac_pci_poll_msec = 1000; /* one second workq period */
27578
27579 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
27580 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
27581 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
27582 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
27583
27584 static struct kobject *edac_pci_top_main_kobj;
27585 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
27586 @@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
27587 }
27588
27589 /* fs_ops table */
27590 -static struct sysfs_ops pci_instance_ops = {
27591 +static const struct sysfs_ops pci_instance_ops = {
27592 .show = edac_pci_instance_show,
27593 .store = edac_pci_instance_store
27594 };
27595 @@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
27596 return -EIO;
27597 }
27598
27599 -static struct sysfs_ops edac_pci_sysfs_ops = {
27600 +static const struct sysfs_ops edac_pci_sysfs_ops = {
27601 .show = edac_pci_dev_show,
27602 .store = edac_pci_dev_store
27603 };
27604 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
27605 edac_printk(KERN_CRIT, EDAC_PCI,
27606 "Signaled System Error on %s\n",
27607 pci_name(dev));
27608 - atomic_inc(&pci_nonparity_count);
27609 + atomic_inc_unchecked(&pci_nonparity_count);
27610 }
27611
27612 if (status & (PCI_STATUS_PARITY)) {
27613 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
27614 "Master Data Parity Error on %s\n",
27615 pci_name(dev));
27616
27617 - atomic_inc(&pci_parity_count);
27618 + atomic_inc_unchecked(&pci_parity_count);
27619 }
27620
27621 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27622 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
27623 "Detected Parity Error on %s\n",
27624 pci_name(dev));
27625
27626 - atomic_inc(&pci_parity_count);
27627 + atomic_inc_unchecked(&pci_parity_count);
27628 }
27629 }
27630
27631 @@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
27632 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
27633 "Signaled System Error on %s\n",
27634 pci_name(dev));
27635 - atomic_inc(&pci_nonparity_count);
27636 + atomic_inc_unchecked(&pci_nonparity_count);
27637 }
27638
27639 if (status & (PCI_STATUS_PARITY)) {
27640 @@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
27641 "Master Data Parity Error on "
27642 "%s\n", pci_name(dev));
27643
27644 - atomic_inc(&pci_parity_count);
27645 + atomic_inc_unchecked(&pci_parity_count);
27646 }
27647
27648 if (status & (PCI_STATUS_DETECTED_PARITY)) {
27649 @@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
27650 "Detected Parity Error on %s\n",
27651 pci_name(dev));
27652
27653 - atomic_inc(&pci_parity_count);
27654 + atomic_inc_unchecked(&pci_parity_count);
27655 }
27656 }
27657 }
27658 @@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
27659 if (!check_pci_errors)
27660 return;
27661
27662 - before_count = atomic_read(&pci_parity_count);
27663 + before_count = atomic_read_unchecked(&pci_parity_count);
27664
27665 /* scan all PCI devices looking for a Parity Error on devices and
27666 * bridges.
27667 @@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
27668 /* Only if operator has selected panic on PCI Error */
27669 if (edac_pci_get_panic_on_pe()) {
27670 /* If the count is different 'after' from 'before' */
27671 - if (before_count != atomic_read(&pci_parity_count))
27672 + if (before_count != atomic_read_unchecked(&pci_parity_count))
27673 panic("EDAC: PCI Parity Error");
27674 }
27675 }
27676 diff -urNp linux-2.6.32.42/drivers/firewire/core-cdev.c linux-2.6.32.42/drivers/firewire/core-cdev.c
27677 --- linux-2.6.32.42/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
27678 +++ linux-2.6.32.42/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
27679 @@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
27680 int ret;
27681
27682 if ((request->channels == 0 && request->bandwidth == 0) ||
27683 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
27684 - request->bandwidth < 0)
27685 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
27686 return -EINVAL;
27687
27688 r = kmalloc(sizeof(*r), GFP_KERNEL);
27689 diff -urNp linux-2.6.32.42/drivers/firewire/core-transaction.c linux-2.6.32.42/drivers/firewire/core-transaction.c
27690 --- linux-2.6.32.42/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
27691 +++ linux-2.6.32.42/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
27692 @@ -36,6 +36,7 @@
27693 #include <linux/string.h>
27694 #include <linux/timer.h>
27695 #include <linux/types.h>
27696 +#include <linux/sched.h>
27697
27698 #include <asm/byteorder.h>
27699
27700 @@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
27701 struct transaction_callback_data d;
27702 struct fw_transaction t;
27703
27704 + pax_track_stack();
27705 +
27706 init_completion(&d.done);
27707 d.payload = payload;
27708 fw_send_request(card, &t, tcode, destination_id, generation, speed,
27709 diff -urNp linux-2.6.32.42/drivers/firmware/dmi_scan.c linux-2.6.32.42/drivers/firmware/dmi_scan.c
27710 --- linux-2.6.32.42/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
27711 +++ linux-2.6.32.42/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
27712 @@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
27713 }
27714 }
27715 else {
27716 - /*
27717 - * no iounmap() for that ioremap(); it would be a no-op, but
27718 - * it's so early in setup that sucker gets confused into doing
27719 - * what it shouldn't if we actually call it.
27720 - */
27721 p = dmi_ioremap(0xF0000, 0x10000);
27722 if (p == NULL)
27723 goto error;
27724 diff -urNp linux-2.6.32.42/drivers/firmware/edd.c linux-2.6.32.42/drivers/firmware/edd.c
27725 --- linux-2.6.32.42/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
27726 +++ linux-2.6.32.42/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
27727 @@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
27728 return ret;
27729 }
27730
27731 -static struct sysfs_ops edd_attr_ops = {
27732 +static const struct sysfs_ops edd_attr_ops = {
27733 .show = edd_attr_show,
27734 };
27735
27736 diff -urNp linux-2.6.32.42/drivers/firmware/efivars.c linux-2.6.32.42/drivers/firmware/efivars.c
27737 --- linux-2.6.32.42/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
27738 +++ linux-2.6.32.42/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
27739 @@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
27740 return ret;
27741 }
27742
27743 -static struct sysfs_ops efivar_attr_ops = {
27744 +static const struct sysfs_ops efivar_attr_ops = {
27745 .show = efivar_attr_show,
27746 .store = efivar_attr_store,
27747 };
27748 diff -urNp linux-2.6.32.42/drivers/firmware/iscsi_ibft.c linux-2.6.32.42/drivers/firmware/iscsi_ibft.c
27749 --- linux-2.6.32.42/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
27750 +++ linux-2.6.32.42/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
27751 @@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
27752 return ret;
27753 }
27754
27755 -static struct sysfs_ops ibft_attr_ops = {
27756 +static const struct sysfs_ops ibft_attr_ops = {
27757 .show = ibft_show_attribute,
27758 };
27759
27760 diff -urNp linux-2.6.32.42/drivers/firmware/memmap.c linux-2.6.32.42/drivers/firmware/memmap.c
27761 --- linux-2.6.32.42/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
27762 +++ linux-2.6.32.42/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
27763 @@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
27764 NULL
27765 };
27766
27767 -static struct sysfs_ops memmap_attr_ops = {
27768 +static const struct sysfs_ops memmap_attr_ops = {
27769 .show = memmap_attr_show,
27770 };
27771
27772 diff -urNp linux-2.6.32.42/drivers/gpio/vr41xx_giu.c linux-2.6.32.42/drivers/gpio/vr41xx_giu.c
27773 --- linux-2.6.32.42/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
27774 +++ linux-2.6.32.42/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
27775 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
27776 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
27777 maskl, pendl, maskh, pendh);
27778
27779 - atomic_inc(&irq_err_count);
27780 + atomic_inc_unchecked(&irq_err_count);
27781
27782 return -EINVAL;
27783 }
27784 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c
27785 --- linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
27786 +++ linux-2.6.32.42/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
27787 @@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
27788 struct drm_crtc *tmp;
27789 int crtc_mask = 1;
27790
27791 - WARN(!crtc, "checking null crtc?");
27792 + BUG_ON(!crtc);
27793
27794 dev = crtc->dev;
27795
27796 @@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
27797
27798 adjusted_mode = drm_mode_duplicate(dev, mode);
27799
27800 + pax_track_stack();
27801 +
27802 crtc->enabled = drm_helper_crtc_in_use(crtc);
27803
27804 if (!crtc->enabled)
27805 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_drv.c linux-2.6.32.42/drivers/gpu/drm/drm_drv.c
27806 --- linux-2.6.32.42/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
27807 +++ linux-2.6.32.42/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
27808 @@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
27809 char *kdata = NULL;
27810
27811 atomic_inc(&dev->ioctl_count);
27812 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
27813 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
27814 ++file_priv->ioctl_count;
27815
27816 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
27817 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_fops.c linux-2.6.32.42/drivers/gpu/drm/drm_fops.c
27818 --- linux-2.6.32.42/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
27819 +++ linux-2.6.32.42/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
27820 @@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
27821 }
27822
27823 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
27824 - atomic_set(&dev->counts[i], 0);
27825 + atomic_set_unchecked(&dev->counts[i], 0);
27826
27827 dev->sigdata.lock = NULL;
27828
27829 @@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
27830
27831 retcode = drm_open_helper(inode, filp, dev);
27832 if (!retcode) {
27833 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
27834 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
27835 spin_lock(&dev->count_lock);
27836 - if (!dev->open_count++) {
27837 + if (local_inc_return(&dev->open_count) == 1) {
27838 spin_unlock(&dev->count_lock);
27839 retcode = drm_setup(dev);
27840 goto out;
27841 @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
27842
27843 lock_kernel();
27844
27845 - DRM_DEBUG("open_count = %d\n", dev->open_count);
27846 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
27847
27848 if (dev->driver->preclose)
27849 dev->driver->preclose(dev, file_priv);
27850 @@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
27851 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
27852 task_pid_nr(current),
27853 (long)old_encode_dev(file_priv->minor->device),
27854 - dev->open_count);
27855 + local_read(&dev->open_count));
27856
27857 /* if the master has gone away we can't do anything with the lock */
27858 if (file_priv->minor->master)
27859 @@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
27860 * End inline drm_release
27861 */
27862
27863 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
27864 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
27865 spin_lock(&dev->count_lock);
27866 - if (!--dev->open_count) {
27867 + if (local_dec_and_test(&dev->open_count)) {
27868 if (atomic_read(&dev->ioctl_count)) {
27869 DRM_ERROR("Device busy: %d\n",
27870 atomic_read(&dev->ioctl_count));
27871 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_gem.c linux-2.6.32.42/drivers/gpu/drm/drm_gem.c
27872 --- linux-2.6.32.42/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
27873 +++ linux-2.6.32.42/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
27874 @@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
27875 spin_lock_init(&dev->object_name_lock);
27876 idr_init(&dev->object_name_idr);
27877 atomic_set(&dev->object_count, 0);
27878 - atomic_set(&dev->object_memory, 0);
27879 + atomic_set_unchecked(&dev->object_memory, 0);
27880 atomic_set(&dev->pin_count, 0);
27881 - atomic_set(&dev->pin_memory, 0);
27882 + atomic_set_unchecked(&dev->pin_memory, 0);
27883 atomic_set(&dev->gtt_count, 0);
27884 - atomic_set(&dev->gtt_memory, 0);
27885 + atomic_set_unchecked(&dev->gtt_memory, 0);
27886
27887 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
27888 if (!mm) {
27889 @@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
27890 goto fput;
27891 }
27892 atomic_inc(&dev->object_count);
27893 - atomic_add(obj->size, &dev->object_memory);
27894 + atomic_add_unchecked(obj->size, &dev->object_memory);
27895 return obj;
27896 fput:
27897 fput(obj->filp);
27898 @@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
27899
27900 fput(obj->filp);
27901 atomic_dec(&dev->object_count);
27902 - atomic_sub(obj->size, &dev->object_memory);
27903 + atomic_sub_unchecked(obj->size, &dev->object_memory);
27904 kfree(obj);
27905 }
27906 EXPORT_SYMBOL(drm_gem_object_free);
27907 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_info.c linux-2.6.32.42/drivers/gpu/drm/drm_info.c
27908 --- linux-2.6.32.42/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
27909 +++ linux-2.6.32.42/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
27910 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
27911 struct drm_local_map *map;
27912 struct drm_map_list *r_list;
27913
27914 - /* Hardcoded from _DRM_FRAME_BUFFER,
27915 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
27916 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
27917 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
27918 + static const char * const types[] = {
27919 + [_DRM_FRAME_BUFFER] = "FB",
27920 + [_DRM_REGISTERS] = "REG",
27921 + [_DRM_SHM] = "SHM",
27922 + [_DRM_AGP] = "AGP",
27923 + [_DRM_SCATTER_GATHER] = "SG",
27924 + [_DRM_CONSISTENT] = "PCI",
27925 + [_DRM_GEM] = "GEM" };
27926 const char *type;
27927 int i;
27928
27929 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
27930 map = r_list->map;
27931 if (!map)
27932 continue;
27933 - if (map->type < 0 || map->type > 5)
27934 + if (map->type >= ARRAY_SIZE(types))
27935 type = "??";
27936 else
27937 type = types[map->type];
27938 @@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
27939 struct drm_device *dev = node->minor->dev;
27940
27941 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
27942 - seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
27943 + seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
27944 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
27945 - seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
27946 - seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
27947 + seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
27948 + seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
27949 seq_printf(m, "%d gtt total\n", dev->gtt_total);
27950 return 0;
27951 }
27952 @@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
27953 mutex_lock(&dev->struct_mutex);
27954 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
27955 atomic_read(&dev->vma_count),
27956 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27957 + NULL, 0);
27958 +#else
27959 high_memory, (u64)virt_to_phys(high_memory));
27960 +#endif
27961
27962 list_for_each_entry(pt, &dev->vmalist, head) {
27963 vma = pt->vma;
27964 @@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
27965 continue;
27966 seq_printf(m,
27967 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
27968 - pt->pid, vma->vm_start, vma->vm_end,
27969 + pt->pid,
27970 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27971 + 0, 0,
27972 +#else
27973 + vma->vm_start, vma->vm_end,
27974 +#endif
27975 vma->vm_flags & VM_READ ? 'r' : '-',
27976 vma->vm_flags & VM_WRITE ? 'w' : '-',
27977 vma->vm_flags & VM_EXEC ? 'x' : '-',
27978 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
27979 vma->vm_flags & VM_LOCKED ? 'l' : '-',
27980 vma->vm_flags & VM_IO ? 'i' : '-',
27981 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27982 + 0);
27983 +#else
27984 vma->vm_pgoff);
27985 +#endif
27986
27987 #if defined(__i386__)
27988 pgprot = pgprot_val(vma->vm_page_prot);
27989 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c
27990 --- linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
27991 +++ linux-2.6.32.42/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
27992 @@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
27993 stats->data[i].value =
27994 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
27995 else
27996 - stats->data[i].value = atomic_read(&dev->counts[i]);
27997 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
27998 stats->data[i].type = dev->types[i];
27999 }
28000
28001 diff -urNp linux-2.6.32.42/drivers/gpu/drm/drm_lock.c linux-2.6.32.42/drivers/gpu/drm/drm_lock.c
28002 --- linux-2.6.32.42/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
28003 +++ linux-2.6.32.42/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
28004 @@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
28005 if (drm_lock_take(&master->lock, lock->context)) {
28006 master->lock.file_priv = file_priv;
28007 master->lock.lock_time = jiffies;
28008 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28009 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28010 break; /* Got lock */
28011 }
28012
28013 @@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
28014 return -EINVAL;
28015 }
28016
28017 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28018 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28019
28020 /* kernel_context_switch isn't used by any of the x86 drm
28021 * modules but is required by the Sparc driver.
28022 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c
28023 --- linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
28024 +++ linux-2.6.32.42/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
28025 @@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
28026 dma->buflist[vertex->idx],
28027 vertex->discard, vertex->used);
28028
28029 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28030 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28031 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28032 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28033 sarea_priv->last_enqueue = dev_priv->counter - 1;
28034 sarea_priv->last_dispatch = (int)hw_status[5];
28035
28036 @@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
28037 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28038 mc->last_render);
28039
28040 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28041 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28042 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28043 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28044 sarea_priv->last_enqueue = dev_priv->counter - 1;
28045 sarea_priv->last_dispatch = (int)hw_status[5];
28046
28047 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h
28048 --- linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
28049 +++ linux-2.6.32.42/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
28050 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28051 int page_flipping;
28052
28053 wait_queue_head_t irq_queue;
28054 - atomic_t irq_received;
28055 - atomic_t irq_emitted;
28056 + atomic_unchecked_t irq_received;
28057 + atomic_unchecked_t irq_emitted;
28058
28059 int front_offset;
28060 } drm_i810_private_t;
28061 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h
28062 --- linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
28063 +++ linux-2.6.32.42/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
28064 @@ -115,8 +115,8 @@ typedef struct drm_i830_private {
28065 int page_flipping;
28066
28067 wait_queue_head_t irq_queue;
28068 - atomic_t irq_received;
28069 - atomic_t irq_emitted;
28070 + atomic_unchecked_t irq_received;
28071 + atomic_unchecked_t irq_emitted;
28072
28073 int use_mi_batchbuffer_start;
28074
28075 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c
28076 --- linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
28077 +++ linux-2.6.32.42/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
28078 @@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
28079
28080 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
28081
28082 - atomic_inc(&dev_priv->irq_received);
28083 + atomic_inc_unchecked(&dev_priv->irq_received);
28084 wake_up_interruptible(&dev_priv->irq_queue);
28085
28086 return IRQ_HANDLED;
28087 @@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
28088
28089 DRM_DEBUG("%s\n", __func__);
28090
28091 - atomic_inc(&dev_priv->irq_emitted);
28092 + atomic_inc_unchecked(&dev_priv->irq_emitted);
28093
28094 BEGIN_LP_RING(2);
28095 OUT_RING(0);
28096 OUT_RING(GFX_OP_USER_INTERRUPT);
28097 ADVANCE_LP_RING();
28098
28099 - return atomic_read(&dev_priv->irq_emitted);
28100 + return atomic_read_unchecked(&dev_priv->irq_emitted);
28101 }
28102
28103 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
28104 @@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
28105
28106 DRM_DEBUG("%s\n", __func__);
28107
28108 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
28109 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
28110 return 0;
28111
28112 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
28113 @@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
28114
28115 for (;;) {
28116 __set_current_state(TASK_INTERRUPTIBLE);
28117 - if (atomic_read(&dev_priv->irq_received) >= irq_nr)
28118 + if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
28119 break;
28120 if ((signed)(end - jiffies) <= 0) {
28121 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
28122 @@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
28123 I830_WRITE16(I830REG_HWSTAM, 0xffff);
28124 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
28125 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
28126 - atomic_set(&dev_priv->irq_received, 0);
28127 - atomic_set(&dev_priv->irq_emitted, 0);
28128 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28129 + atomic_set_unchecked(&dev_priv->irq_emitted, 0);
28130 init_waitqueue_head(&dev_priv->irq_queue);
28131 }
28132
28133 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c
28134 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
28135 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
28136 @@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
28137 }
28138 }
28139
28140 -struct intel_dvo_dev_ops ch7017_ops = {
28141 +const struct intel_dvo_dev_ops ch7017_ops = {
28142 .init = ch7017_init,
28143 .detect = ch7017_detect,
28144 .mode_valid = ch7017_mode_valid,
28145 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c
28146 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
28147 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
28148 @@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
28149 }
28150 }
28151
28152 -struct intel_dvo_dev_ops ch7xxx_ops = {
28153 +const struct intel_dvo_dev_ops ch7xxx_ops = {
28154 .init = ch7xxx_init,
28155 .detect = ch7xxx_detect,
28156 .mode_valid = ch7xxx_mode_valid,
28157 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h
28158 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
28159 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
28160 @@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
28161 *
28162 * \return singly-linked list of modes or NULL if no modes found.
28163 */
28164 - struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
28165 + struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
28166
28167 /**
28168 * Clean up driver-specific bits of the output
28169 */
28170 - void (*destroy) (struct intel_dvo_device *dvo);
28171 + void (* const destroy) (struct intel_dvo_device *dvo);
28172
28173 /**
28174 * Debugging hook to dump device registers to log file
28175 */
28176 - void (*dump_regs)(struct intel_dvo_device *dvo);
28177 + void (* const dump_regs)(struct intel_dvo_device *dvo);
28178 };
28179
28180 -extern struct intel_dvo_dev_ops sil164_ops;
28181 -extern struct intel_dvo_dev_ops ch7xxx_ops;
28182 -extern struct intel_dvo_dev_ops ivch_ops;
28183 -extern struct intel_dvo_dev_ops tfp410_ops;
28184 -extern struct intel_dvo_dev_ops ch7017_ops;
28185 +extern const struct intel_dvo_dev_ops sil164_ops;
28186 +extern const struct intel_dvo_dev_ops ch7xxx_ops;
28187 +extern const struct intel_dvo_dev_ops ivch_ops;
28188 +extern const struct intel_dvo_dev_ops tfp410_ops;
28189 +extern const struct intel_dvo_dev_ops ch7017_ops;
28190
28191 #endif /* _INTEL_DVO_H */
28192 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c
28193 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
28194 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
28195 @@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
28196 }
28197 }
28198
28199 -struct intel_dvo_dev_ops ivch_ops= {
28200 +const struct intel_dvo_dev_ops ivch_ops= {
28201 .init = ivch_init,
28202 .dpms = ivch_dpms,
28203 .save = ivch_save,
28204 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c
28205 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
28206 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
28207 @@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
28208 }
28209 }
28210
28211 -struct intel_dvo_dev_ops sil164_ops = {
28212 +const struct intel_dvo_dev_ops sil164_ops = {
28213 .init = sil164_init,
28214 .detect = sil164_detect,
28215 .mode_valid = sil164_mode_valid,
28216 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c
28217 --- linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
28218 +++ linux-2.6.32.42/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
28219 @@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
28220 }
28221 }
28222
28223 -struct intel_dvo_dev_ops tfp410_ops = {
28224 +const struct intel_dvo_dev_ops tfp410_ops = {
28225 .init = tfp410_init,
28226 .detect = tfp410_detect,
28227 .mode_valid = tfp410_mode_valid,
28228 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c
28229 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
28230 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
28231 @@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
28232 I915_READ(GTIMR));
28233 }
28234 seq_printf(m, "Interrupts received: %d\n",
28235 - atomic_read(&dev_priv->irq_received));
28236 + atomic_read_unchecked(&dev_priv->irq_received));
28237 if (dev_priv->hw_status_page != NULL) {
28238 seq_printf(m, "Current sequence: %d\n",
28239 i915_get_gem_seqno(dev));
28240 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c
28241 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
28242 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
28243 @@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
28244 return i915_resume(dev);
28245 }
28246
28247 -static struct vm_operations_struct i915_gem_vm_ops = {
28248 +static const struct vm_operations_struct i915_gem_vm_ops = {
28249 .fault = i915_gem_fault,
28250 .open = drm_gem_vm_open,
28251 .close = drm_gem_vm_close,
28252 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h
28253 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
28254 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_drv.h 2011-05-04 17:56:28.000000000 -0400
28255 @@ -197,7 +197,7 @@ typedef struct drm_i915_private {
28256 int page_flipping;
28257
28258 wait_queue_head_t irq_queue;
28259 - atomic_t irq_received;
28260 + atomic_unchecked_t irq_received;
28261 /** Protects user_irq_refcount and irq_mask_reg */
28262 spinlock_t user_irq_lock;
28263 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
28264 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c
28265 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
28266 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
28267 @@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
28268
28269 args->aper_size = dev->gtt_total;
28270 args->aper_available_size = (args->aper_size -
28271 - atomic_read(&dev->pin_memory));
28272 + atomic_read_unchecked(&dev->pin_memory));
28273
28274 return 0;
28275 }
28276 @@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
28277 return -EINVAL;
28278 }
28279
28280 + if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28281 + drm_gem_object_unreference(obj);
28282 + return -EFAULT;
28283 + }
28284 +
28285 if (i915_gem_object_needs_bit17_swizzle(obj)) {
28286 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
28287 } else {
28288 @@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
28289 return -EINVAL;
28290 }
28291
28292 + if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28293 + drm_gem_object_unreference(obj);
28294 + return -EFAULT;
28295 + }
28296 +
28297 /* We can only do the GTT pwrite on untiled buffers, as otherwise
28298 * it would end up going through the fenced access, and we'll get
28299 * different detiling behavior between reading and writing.
28300 @@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
28301
28302 if (obj_priv->gtt_space) {
28303 atomic_dec(&dev->gtt_count);
28304 - atomic_sub(obj->size, &dev->gtt_memory);
28305 + atomic_sub_unchecked(obj->size, &dev->gtt_memory);
28306
28307 drm_mm_put_block(obj_priv->gtt_space);
28308 obj_priv->gtt_space = NULL;
28309 @@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
28310 goto search_free;
28311 }
28312 atomic_inc(&dev->gtt_count);
28313 - atomic_add(obj->size, &dev->gtt_memory);
28314 + atomic_add_unchecked(obj->size, &dev->gtt_memory);
28315
28316 /* Assert that the object is not currently in any GPU domain. As it
28317 * wasn't in the GTT, there shouldn't be any way it could have been in
28318 @@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
28319 "%d/%d gtt bytes\n",
28320 atomic_read(&dev->object_count),
28321 atomic_read(&dev->pin_count),
28322 - atomic_read(&dev->object_memory),
28323 - atomic_read(&dev->pin_memory),
28324 - atomic_read(&dev->gtt_memory),
28325 + atomic_read_unchecked(&dev->object_memory),
28326 + atomic_read_unchecked(&dev->pin_memory),
28327 + atomic_read_unchecked(&dev->gtt_memory),
28328 dev->gtt_total);
28329 }
28330 goto err;
28331 @@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
28332 */
28333 if (obj_priv->pin_count == 1) {
28334 atomic_inc(&dev->pin_count);
28335 - atomic_add(obj->size, &dev->pin_memory);
28336 + atomic_add_unchecked(obj->size, &dev->pin_memory);
28337 if (!obj_priv->active &&
28338 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
28339 !list_empty(&obj_priv->list))
28340 @@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
28341 list_move_tail(&obj_priv->list,
28342 &dev_priv->mm.inactive_list);
28343 atomic_dec(&dev->pin_count);
28344 - atomic_sub(obj->size, &dev->pin_memory);
28345 + atomic_sub_unchecked(obj->size, &dev->pin_memory);
28346 }
28347 i915_verify_inactive(dev, __FILE__, __LINE__);
28348 }
28349 diff -urNp linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c
28350 --- linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
28351 +++ linux-2.6.32.42/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
28352 @@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
28353 int irq_received;
28354 int ret = IRQ_NONE;
28355
28356 - atomic_inc(&dev_priv->irq_received);
28357 + atomic_inc_unchecked(&dev_priv->irq_received);
28358
28359 if (IS_IGDNG(dev))
28360 return igdng_irq_handler(dev);
28361 @@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
28362 {
28363 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
28364
28365 - atomic_set(&dev_priv->irq_received, 0);
28366 + atomic_set_unchecked(&dev_priv->irq_received, 0);
28367
28368 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
28369 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
28370 diff -urNp linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h
28371 --- linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
28372 +++ linux-2.6.32.42/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
28373 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
28374 u32 clear_cmd;
28375 u32 maccess;
28376
28377 - atomic_t vbl_received; /**< Number of vblanks received. */
28378 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
28379 wait_queue_head_t fence_queue;
28380 - atomic_t last_fence_retired;
28381 + atomic_unchecked_t last_fence_retired;
28382 u32 next_fence_to_post;
28383
28384 unsigned int fb_cpp;
28385 diff -urNp linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c
28386 --- linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
28387 +++ linux-2.6.32.42/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
28388 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
28389 if (crtc != 0)
28390 return 0;
28391
28392 - return atomic_read(&dev_priv->vbl_received);
28393 + return atomic_read_unchecked(&dev_priv->vbl_received);
28394 }
28395
28396
28397 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
28398 /* VBLANK interrupt */
28399 if (status & MGA_VLINEPEN) {
28400 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
28401 - atomic_inc(&dev_priv->vbl_received);
28402 + atomic_inc_unchecked(&dev_priv->vbl_received);
28403 drm_handle_vblank(dev, 0);
28404 handled = 1;
28405 }
28406 @@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
28407 MGA_WRITE(MGA_PRIMEND, prim_end);
28408 }
28409
28410 - atomic_inc(&dev_priv->last_fence_retired);
28411 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
28412 DRM_WAKEUP(&dev_priv->fence_queue);
28413 handled = 1;
28414 }
28415 @@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
28416 * using fences.
28417 */
28418 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
28419 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
28420 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
28421 - *sequence) <= (1 << 23)));
28422
28423 *sequence = cur_fence;
28424 diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c
28425 --- linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
28426 +++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
28427 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
28428
28429 /* GH: Simple idle check.
28430 */
28431 - atomic_set(&dev_priv->idle_count, 0);
28432 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28433
28434 /* We don't support anything other than bus-mastering ring mode,
28435 * but the ring can be in either AGP or PCI space for the ring
28436 diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h
28437 --- linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
28438 +++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
28439 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
28440 int is_pci;
28441 unsigned long cce_buffers_offset;
28442
28443 - atomic_t idle_count;
28444 + atomic_unchecked_t idle_count;
28445
28446 int page_flipping;
28447 int current_page;
28448 u32 crtc_offset;
28449 u32 crtc_offset_cntl;
28450
28451 - atomic_t vbl_received;
28452 + atomic_unchecked_t vbl_received;
28453
28454 u32 color_fmt;
28455 unsigned int front_offset;
28456 diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c
28457 --- linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
28458 +++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
28459 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
28460 if (crtc != 0)
28461 return 0;
28462
28463 - return atomic_read(&dev_priv->vbl_received);
28464 + return atomic_read_unchecked(&dev_priv->vbl_received);
28465 }
28466
28467 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
28468 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
28469 /* VBLANK interrupt */
28470 if (status & R128_CRTC_VBLANK_INT) {
28471 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
28472 - atomic_inc(&dev_priv->vbl_received);
28473 + atomic_inc_unchecked(&dev_priv->vbl_received);
28474 drm_handle_vblank(dev, 0);
28475 return IRQ_HANDLED;
28476 }
28477 diff -urNp linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c
28478 --- linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
28479 +++ linux-2.6.32.42/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
28480 @@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
28481
28482 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
28483 {
28484 - if (atomic_read(&dev_priv->idle_count) == 0) {
28485 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
28486 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
28487 } else {
28488 - atomic_set(&dev_priv->idle_count, 0);
28489 + atomic_set_unchecked(&dev_priv->idle_count, 0);
28490 }
28491 }
28492
28493 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c
28494 --- linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
28495 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
28496 @@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
28497 char name[512];
28498 int i;
28499
28500 + pax_track_stack();
28501 +
28502 ctx->card = card;
28503 ctx->bios = bios;
28504
28505 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c
28506 --- linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
28507 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
28508 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
28509 regex_t mask_rex;
28510 regmatch_t match[4];
28511 char buf[1024];
28512 - size_t end;
28513 + long end;
28514 int len;
28515 int done = 0;
28516 int r;
28517 unsigned o;
28518 struct offset *offset;
28519 char last_reg_s[10];
28520 - int last_reg;
28521 + unsigned long last_reg;
28522
28523 if (regcomp
28524 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
28525 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c
28526 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
28527 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
28528 @@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
28529 bool linkb;
28530 struct radeon_i2c_bus_rec ddc_bus;
28531
28532 + pax_track_stack();
28533 +
28534 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
28535
28536 if (data_offset == 0)
28537 @@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
28538 }
28539 }
28540
28541 -struct bios_connector {
28542 +static struct bios_connector {
28543 bool valid;
28544 uint16_t line_mux;
28545 uint16_t devices;
28546 int connector_type;
28547 struct radeon_i2c_bus_rec ddc_bus;
28548 -};
28549 +} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
28550
28551 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
28552 drm_device
28553 @@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
28554 uint8_t dac;
28555 union atom_supported_devices *supported_devices;
28556 int i, j;
28557 - struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
28558
28559 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
28560
28561 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c
28562 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
28563 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
28564 @@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
28565
28566 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
28567 error = freq - current_freq;
28568 - error = error < 0 ? 0xffffffff : error;
28569 + error = (int32_t)error < 0 ? 0xffffffff : error;
28570 } else
28571 error = abs(current_freq - freq);
28572 vco_diff = abs(vco - best_vco);
28573 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h
28574 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
28575 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
28576 @@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
28577
28578 /* SW interrupt */
28579 wait_queue_head_t swi_queue;
28580 - atomic_t swi_emitted;
28581 + atomic_unchecked_t swi_emitted;
28582 int vblank_crtc;
28583 uint32_t irq_enable_reg;
28584 uint32_t r500_disp_irq_reg;
28585 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c
28586 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
28587 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
28588 @@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
28589 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
28590 return 0;
28591 }
28592 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
28593 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
28594 if (!rdev->cp.ready) {
28595 /* FIXME: cp is not running assume everythings is done right
28596 * away
28597 @@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
28598 return r;
28599 }
28600 WREG32(rdev->fence_drv.scratch_reg, 0);
28601 - atomic_set(&rdev->fence_drv.seq, 0);
28602 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
28603 INIT_LIST_HEAD(&rdev->fence_drv.created);
28604 INIT_LIST_HEAD(&rdev->fence_drv.emited);
28605 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
28606 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h
28607 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
28608 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon.h 2011-05-04 17:56:28.000000000 -0400
28609 @@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
28610 */
28611 struct radeon_fence_driver {
28612 uint32_t scratch_reg;
28613 - atomic_t seq;
28614 + atomic_unchecked_t seq;
28615 uint32_t last_seq;
28616 unsigned long count_timeout;
28617 wait_queue_head_t queue;
28618 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c
28619 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
28620 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
28621 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
28622 request = compat_alloc_user_space(sizeof(*request));
28623 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
28624 || __put_user(req32.param, &request->param)
28625 - || __put_user((void __user *)(unsigned long)req32.value,
28626 + || __put_user((unsigned long)req32.value,
28627 &request->value))
28628 return -EFAULT;
28629
28630 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c
28631 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
28632 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
28633 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
28634 unsigned int ret;
28635 RING_LOCALS;
28636
28637 - atomic_inc(&dev_priv->swi_emitted);
28638 - ret = atomic_read(&dev_priv->swi_emitted);
28639 + atomic_inc_unchecked(&dev_priv->swi_emitted);
28640 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
28641
28642 BEGIN_RING(4);
28643 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
28644 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
28645 drm_radeon_private_t *dev_priv =
28646 (drm_radeon_private_t *) dev->dev_private;
28647
28648 - atomic_set(&dev_priv->swi_emitted, 0);
28649 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
28650 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
28651
28652 dev->max_vblank_count = 0x001fffff;
28653 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c
28654 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
28655 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
28656 @@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
28657 {
28658 drm_radeon_private_t *dev_priv = dev->dev_private;
28659 drm_radeon_getparam_t *param = data;
28660 - int value;
28661 + int value = 0;
28662
28663 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
28664
28665 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c
28666 --- linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
28667 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
28668 @@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
28669 DRM_INFO("radeon: ttm finalized\n");
28670 }
28671
28672 -static struct vm_operations_struct radeon_ttm_vm_ops;
28673 -static const struct vm_operations_struct *ttm_vm_ops = NULL;
28674 -
28675 -static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
28676 -{
28677 - struct ttm_buffer_object *bo;
28678 - int r;
28679 -
28680 - bo = (struct ttm_buffer_object *)vma->vm_private_data;
28681 - if (bo == NULL) {
28682 - return VM_FAULT_NOPAGE;
28683 - }
28684 - r = ttm_vm_ops->fault(vma, vmf);
28685 - return r;
28686 -}
28687 -
28688 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
28689 {
28690 struct drm_file *file_priv;
28691 struct radeon_device *rdev;
28692 - int r;
28693
28694 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
28695 return drm_mmap(filp, vma);
28696 @@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
28697
28698 file_priv = (struct drm_file *)filp->private_data;
28699 rdev = file_priv->minor->dev->dev_private;
28700 - if (rdev == NULL) {
28701 + if (!rdev)
28702 return -EINVAL;
28703 - }
28704 - r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
28705 - if (unlikely(r != 0)) {
28706 - return r;
28707 - }
28708 - if (unlikely(ttm_vm_ops == NULL)) {
28709 - ttm_vm_ops = vma->vm_ops;
28710 - radeon_ttm_vm_ops = *ttm_vm_ops;
28711 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
28712 - }
28713 - vma->vm_ops = &radeon_ttm_vm_ops;
28714 - return 0;
28715 + return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
28716 }
28717
28718
28719 diff -urNp linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c
28720 --- linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
28721 +++ linux-2.6.32.42/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
28722 @@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
28723 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
28724 rdev->pm.sideport_bandwidth.full)
28725 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
28726 - read_delay_latency.full = rfixed_const(370 * 800 * 1000);
28727 + read_delay_latency.full = rfixed_const(800 * 1000);
28728 read_delay_latency.full = rfixed_div(read_delay_latency,
28729 rdev->pm.igp_sideport_mclk);
28730 + a.full = rfixed_const(370);
28731 + read_delay_latency.full = rfixed_mul(read_delay_latency, a);
28732 } else {
28733 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
28734 rdev->pm.k8_bandwidth.full)
28735 diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c
28736 --- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
28737 +++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
28738 @@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
28739 NULL
28740 };
28741
28742 -static struct sysfs_ops ttm_bo_global_ops = {
28743 +static const struct sysfs_ops ttm_bo_global_ops = {
28744 .show = &ttm_bo_global_show
28745 };
28746
28747 diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c
28748 --- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
28749 +++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
28750 @@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
28751 {
28752 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
28753 vma->vm_private_data;
28754 - struct ttm_bo_device *bdev = bo->bdev;
28755 + struct ttm_bo_device *bdev;
28756 unsigned long bus_base;
28757 unsigned long bus_offset;
28758 unsigned long bus_size;
28759 @@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
28760 unsigned long address = (unsigned long)vmf->virtual_address;
28761 int retval = VM_FAULT_NOPAGE;
28762
28763 + if (!bo)
28764 + return VM_FAULT_NOPAGE;
28765 + bdev = bo->bdev;
28766 +
28767 /*
28768 * Work around locking order reversal in fault / nopfn
28769 * between mmap_sem and bo_reserve: Perform a trylock operation
28770 diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c
28771 --- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
28772 +++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
28773 @@ -36,7 +36,7 @@
28774 struct ttm_global_item {
28775 struct mutex mutex;
28776 void *object;
28777 - int refcount;
28778 + atomic_t refcount;
28779 };
28780
28781 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
28782 @@ -49,7 +49,7 @@ void ttm_global_init(void)
28783 struct ttm_global_item *item = &glob[i];
28784 mutex_init(&item->mutex);
28785 item->object = NULL;
28786 - item->refcount = 0;
28787 + atomic_set(&item->refcount, 0);
28788 }
28789 }
28790
28791 @@ -59,7 +59,7 @@ void ttm_global_release(void)
28792 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
28793 struct ttm_global_item *item = &glob[i];
28794 BUG_ON(item->object != NULL);
28795 - BUG_ON(item->refcount != 0);
28796 + BUG_ON(atomic_read(&item->refcount) != 0);
28797 }
28798 }
28799
28800 @@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
28801 void *object;
28802
28803 mutex_lock(&item->mutex);
28804 - if (item->refcount == 0) {
28805 + if (atomic_read(&item->refcount) == 0) {
28806 item->object = kzalloc(ref->size, GFP_KERNEL);
28807 if (unlikely(item->object == NULL)) {
28808 ret = -ENOMEM;
28809 @@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
28810 goto out_err;
28811
28812 }
28813 - ++item->refcount;
28814 + atomic_inc(&item->refcount);
28815 ref->object = item->object;
28816 object = item->object;
28817 mutex_unlock(&item->mutex);
28818 @@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
28819 struct ttm_global_item *item = &glob[ref->global_type];
28820
28821 mutex_lock(&item->mutex);
28822 - BUG_ON(item->refcount == 0);
28823 + BUG_ON(atomic_read(&item->refcount) == 0);
28824 BUG_ON(ref->object != item->object);
28825 - if (--item->refcount == 0) {
28826 + if (atomic_dec_and_test(&item->refcount)) {
28827 ref->release(ref);
28828 item->object = NULL;
28829 }
28830 diff -urNp linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c
28831 --- linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
28832 +++ linux-2.6.32.42/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
28833 @@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
28834 NULL
28835 };
28836
28837 -static struct sysfs_ops ttm_mem_zone_ops = {
28838 +static const struct sysfs_ops ttm_mem_zone_ops = {
28839 .show = &ttm_mem_zone_show,
28840 .store = &ttm_mem_zone_store
28841 };
28842 diff -urNp linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h
28843 --- linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
28844 +++ linux-2.6.32.42/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
28845 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
28846 typedef uint32_t maskarray_t[5];
28847
28848 typedef struct drm_via_irq {
28849 - atomic_t irq_received;
28850 + atomic_unchecked_t irq_received;
28851 uint32_t pending_mask;
28852 uint32_t enable_mask;
28853 wait_queue_head_t irq_queue;
28854 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
28855 struct timeval last_vblank;
28856 int last_vblank_valid;
28857 unsigned usec_per_vblank;
28858 - atomic_t vbl_received;
28859 + atomic_unchecked_t vbl_received;
28860 drm_via_state_t hc_state;
28861 char pci_buf[VIA_PCI_BUF_SIZE];
28862 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
28863 diff -urNp linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c
28864 --- linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
28865 +++ linux-2.6.32.42/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
28866 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
28867 if (crtc != 0)
28868 return 0;
28869
28870 - return atomic_read(&dev_priv->vbl_received);
28871 + return atomic_read_unchecked(&dev_priv->vbl_received);
28872 }
28873
28874 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
28875 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
28876
28877 status = VIA_READ(VIA_REG_INTERRUPT);
28878 if (status & VIA_IRQ_VBLANK_PENDING) {
28879 - atomic_inc(&dev_priv->vbl_received);
28880 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
28881 + atomic_inc_unchecked(&dev_priv->vbl_received);
28882 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
28883 do_gettimeofday(&cur_vblank);
28884 if (dev_priv->last_vblank_valid) {
28885 dev_priv->usec_per_vblank =
28886 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
28887 dev_priv->last_vblank = cur_vblank;
28888 dev_priv->last_vblank_valid = 1;
28889 }
28890 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
28891 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
28892 DRM_DEBUG("US per vblank is: %u\n",
28893 dev_priv->usec_per_vblank);
28894 }
28895 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
28896
28897 for (i = 0; i < dev_priv->num_irqs; ++i) {
28898 if (status & cur_irq->pending_mask) {
28899 - atomic_inc(&cur_irq->irq_received);
28900 + atomic_inc_unchecked(&cur_irq->irq_received);
28901 DRM_WAKEUP(&cur_irq->irq_queue);
28902 handled = 1;
28903 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
28904 @@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
28905 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28906 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
28907 masks[irq][4]));
28908 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
28909 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
28910 } else {
28911 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
28912 (((cur_irq_sequence =
28913 - atomic_read(&cur_irq->irq_received)) -
28914 + atomic_read_unchecked(&cur_irq->irq_received)) -
28915 *sequence) <= (1 << 23)));
28916 }
28917 *sequence = cur_irq_sequence;
28918 @@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
28919 }
28920
28921 for (i = 0; i < dev_priv->num_irqs; ++i) {
28922 - atomic_set(&cur_irq->irq_received, 0);
28923 + atomic_set_unchecked(&cur_irq->irq_received, 0);
28924 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
28925 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
28926 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
28927 @@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
28928 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
28929 case VIA_IRQ_RELATIVE:
28930 irqwait->request.sequence +=
28931 - atomic_read(&cur_irq->irq_received);
28932 + atomic_read_unchecked(&cur_irq->irq_received);
28933 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
28934 case VIA_IRQ_ABSOLUTE:
28935 break;
28936 diff -urNp linux-2.6.32.42/drivers/hid/hid-core.c linux-2.6.32.42/drivers/hid/hid-core.c
28937 --- linux-2.6.32.42/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
28938 +++ linux-2.6.32.42/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
28939 @@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
28940
28941 int hid_add_device(struct hid_device *hdev)
28942 {
28943 - static atomic_t id = ATOMIC_INIT(0);
28944 + static atomic_unchecked_t id = ATOMIC_INIT(0);
28945 int ret;
28946
28947 if (WARN_ON(hdev->status & HID_STAT_ADDED))
28948 @@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
28949 /* XXX hack, any other cleaner solution after the driver core
28950 * is converted to allow more than 20 bytes as the device name? */
28951 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
28952 - hdev->vendor, hdev->product, atomic_inc_return(&id));
28953 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
28954
28955 ret = device_add(&hdev->dev);
28956 if (!ret)
28957 diff -urNp linux-2.6.32.42/drivers/hid/usbhid/hiddev.c linux-2.6.32.42/drivers/hid/usbhid/hiddev.c
28958 --- linux-2.6.32.42/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
28959 +++ linux-2.6.32.42/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
28960 @@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
28961 return put_user(HID_VERSION, (int __user *)arg);
28962
28963 case HIDIOCAPPLICATION:
28964 - if (arg < 0 || arg >= hid->maxapplication)
28965 + if (arg >= hid->maxapplication)
28966 return -EINVAL;
28967
28968 for (i = 0; i < hid->maxcollection; i++)
28969 diff -urNp linux-2.6.32.42/drivers/hwmon/lis3lv02d.c linux-2.6.32.42/drivers/hwmon/lis3lv02d.c
28970 --- linux-2.6.32.42/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
28971 +++ linux-2.6.32.42/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
28972 @@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
28973 * the lid is closed. This leads to interrupts as soon as a little move
28974 * is done.
28975 */
28976 - atomic_inc(&lis3_dev.count);
28977 + atomic_inc_unchecked(&lis3_dev.count);
28978
28979 wake_up_interruptible(&lis3_dev.misc_wait);
28980 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
28981 @@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
28982 if (test_and_set_bit(0, &lis3_dev.misc_opened))
28983 return -EBUSY; /* already open */
28984
28985 - atomic_set(&lis3_dev.count, 0);
28986 + atomic_set_unchecked(&lis3_dev.count, 0);
28987
28988 /*
28989 * The sensor can generate interrupts for free-fall and direction
28990 @@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
28991 add_wait_queue(&lis3_dev.misc_wait, &wait);
28992 while (true) {
28993 set_current_state(TASK_INTERRUPTIBLE);
28994 - data = atomic_xchg(&lis3_dev.count, 0);
28995 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
28996 if (data)
28997 break;
28998
28999 @@ -244,7 +244,7 @@ out:
29000 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
29001 {
29002 poll_wait(file, &lis3_dev.misc_wait, wait);
29003 - if (atomic_read(&lis3_dev.count))
29004 + if (atomic_read_unchecked(&lis3_dev.count))
29005 return POLLIN | POLLRDNORM;
29006 return 0;
29007 }
29008 diff -urNp linux-2.6.32.42/drivers/hwmon/lis3lv02d.h linux-2.6.32.42/drivers/hwmon/lis3lv02d.h
29009 --- linux-2.6.32.42/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
29010 +++ linux-2.6.32.42/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
29011 @@ -201,7 +201,7 @@ struct lis3lv02d {
29012
29013 struct input_polled_dev *idev; /* input device */
29014 struct platform_device *pdev; /* platform device */
29015 - atomic_t count; /* interrupt count after last read */
29016 + atomic_unchecked_t count; /* interrupt count after last read */
29017 int xcalib; /* calibrated null value for x */
29018 int ycalib; /* calibrated null value for y */
29019 int zcalib; /* calibrated null value for z */
29020 diff -urNp linux-2.6.32.42/drivers/hwmon/sht15.c linux-2.6.32.42/drivers/hwmon/sht15.c
29021 --- linux-2.6.32.42/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
29022 +++ linux-2.6.32.42/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
29023 @@ -112,7 +112,7 @@ struct sht15_data {
29024 int supply_uV;
29025 int supply_uV_valid;
29026 struct work_struct update_supply_work;
29027 - atomic_t interrupt_handled;
29028 + atomic_unchecked_t interrupt_handled;
29029 };
29030
29031 /**
29032 @@ -245,13 +245,13 @@ static inline int sht15_update_single_va
29033 return ret;
29034
29035 gpio_direction_input(data->pdata->gpio_data);
29036 - atomic_set(&data->interrupt_handled, 0);
29037 + atomic_set_unchecked(&data->interrupt_handled, 0);
29038
29039 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29040 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29041 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29042 /* Only relevant if the interrupt hasn't occured. */
29043 - if (!atomic_read(&data->interrupt_handled))
29044 + if (!atomic_read_unchecked(&data->interrupt_handled))
29045 schedule_work(&data->read_work);
29046 }
29047 ret = wait_event_timeout(data->wait_queue,
29048 @@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
29049 struct sht15_data *data = d;
29050 /* First disable the interrupt */
29051 disable_irq_nosync(irq);
29052 - atomic_inc(&data->interrupt_handled);
29053 + atomic_inc_unchecked(&data->interrupt_handled);
29054 /* Then schedule a reading work struct */
29055 if (data->flag != SHT15_READING_NOTHING)
29056 schedule_work(&data->read_work);
29057 @@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
29058 here as could have gone low in meantime so verify
29059 it hasn't!
29060 */
29061 - atomic_set(&data->interrupt_handled, 0);
29062 + atomic_set_unchecked(&data->interrupt_handled, 0);
29063 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29064 /* If still not occured or another handler has been scheduled */
29065 if (gpio_get_value(data->pdata->gpio_data)
29066 - || atomic_read(&data->interrupt_handled))
29067 + || atomic_read_unchecked(&data->interrupt_handled))
29068 return;
29069 }
29070 /* Read the data back from the device */
29071 diff -urNp linux-2.6.32.42/drivers/hwmon/w83791d.c linux-2.6.32.42/drivers/hwmon/w83791d.c
29072 --- linux-2.6.32.42/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
29073 +++ linux-2.6.32.42/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
29074 @@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
29075 struct i2c_board_info *info);
29076 static int w83791d_remove(struct i2c_client *client);
29077
29078 -static int w83791d_read(struct i2c_client *client, u8 register);
29079 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
29080 +static int w83791d_read(struct i2c_client *client, u8 reg);
29081 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
29082 static struct w83791d_data *w83791d_update_device(struct device *dev);
29083
29084 #ifdef DEBUG
29085 diff -urNp linux-2.6.32.42/drivers/ide/ide-cd.c linux-2.6.32.42/drivers/ide/ide-cd.c
29086 --- linux-2.6.32.42/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
29087 +++ linux-2.6.32.42/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
29088 @@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
29089 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29090 if ((unsigned long)buf & alignment
29091 || blk_rq_bytes(rq) & q->dma_pad_mask
29092 - || object_is_on_stack(buf))
29093 + || object_starts_on_stack(buf))
29094 drive->dma = 0;
29095 }
29096 }
29097 diff -urNp linux-2.6.32.42/drivers/ide/ide-floppy.c linux-2.6.32.42/drivers/ide/ide-floppy.c
29098 --- linux-2.6.32.42/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
29099 +++ linux-2.6.32.42/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
29100 @@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
29101 u8 pc_buf[256], header_len, desc_cnt;
29102 int i, rc = 1, blocks, length;
29103
29104 + pax_track_stack();
29105 +
29106 ide_debug_log(IDE_DBG_FUNC, "enter");
29107
29108 drive->bios_cyl = 0;
29109 diff -urNp linux-2.6.32.42/drivers/ide/setup-pci.c linux-2.6.32.42/drivers/ide/setup-pci.c
29110 --- linux-2.6.32.42/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
29111 +++ linux-2.6.32.42/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
29112 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
29113 int ret, i, n_ports = dev2 ? 4 : 2;
29114 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
29115
29116 + pax_track_stack();
29117 +
29118 for (i = 0; i < n_ports / 2; i++) {
29119 ret = ide_setup_pci_controller(pdev[i], d, !i);
29120 if (ret < 0)
29121 diff -urNp linux-2.6.32.42/drivers/ieee1394/dv1394.c linux-2.6.32.42/drivers/ieee1394/dv1394.c
29122 --- linux-2.6.32.42/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
29123 +++ linux-2.6.32.42/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
29124 @@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
29125 based upon DIF section and sequence
29126 */
29127
29128 -static void inline
29129 +static inline void
29130 frame_put_packet (struct frame *f, struct packet *p)
29131 {
29132 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
29133 diff -urNp linux-2.6.32.42/drivers/ieee1394/hosts.c linux-2.6.32.42/drivers/ieee1394/hosts.c
29134 --- linux-2.6.32.42/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
29135 +++ linux-2.6.32.42/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
29136 @@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
29137 }
29138
29139 static struct hpsb_host_driver dummy_driver = {
29140 + .name = "dummy",
29141 .transmit_packet = dummy_transmit_packet,
29142 .devctl = dummy_devctl,
29143 .isoctl = dummy_isoctl
29144 diff -urNp linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c
29145 --- linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
29146 +++ linux-2.6.32.42/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
29147 @@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
29148 for (func = 0; func < 8; func++) {
29149 u32 class = read_pci_config(num,slot,func,
29150 PCI_CLASS_REVISION);
29151 - if ((class == 0xffffffff))
29152 + if (class == 0xffffffff)
29153 continue; /* No device at this func */
29154
29155 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
29156 diff -urNp linux-2.6.32.42/drivers/ieee1394/ohci1394.c linux-2.6.32.42/drivers/ieee1394/ohci1394.c
29157 --- linux-2.6.32.42/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
29158 +++ linux-2.6.32.42/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
29159 @@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
29160 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
29161
29162 /* Module Parameters */
29163 -static int phys_dma = 1;
29164 +static int phys_dma;
29165 module_param(phys_dma, int, 0444);
29166 -MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
29167 +MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
29168
29169 static void dma_trm_tasklet(unsigned long data);
29170 static void dma_trm_reset(struct dma_trm_ctx *d);
29171 diff -urNp linux-2.6.32.42/drivers/ieee1394/sbp2.c linux-2.6.32.42/drivers/ieee1394/sbp2.c
29172 --- linux-2.6.32.42/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
29173 +++ linux-2.6.32.42/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
29174 @@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
29175 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
29176 MODULE_LICENSE("GPL");
29177
29178 -static int sbp2_module_init(void)
29179 +static int __init sbp2_module_init(void)
29180 {
29181 int ret;
29182
29183 diff -urNp linux-2.6.32.42/drivers/infiniband/core/cm.c linux-2.6.32.42/drivers/infiniband/core/cm.c
29184 --- linux-2.6.32.42/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
29185 +++ linux-2.6.32.42/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
29186 @@ -112,7 +112,7 @@ static char const counter_group_names[CM
29187
29188 struct cm_counter_group {
29189 struct kobject obj;
29190 - atomic_long_t counter[CM_ATTR_COUNT];
29191 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29192 };
29193
29194 struct cm_counter_attribute {
29195 @@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
29196 struct ib_mad_send_buf *msg = NULL;
29197 int ret;
29198
29199 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29200 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29201 counter[CM_REQ_COUNTER]);
29202
29203 /* Quick state check to discard duplicate REQs. */
29204 @@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
29205 if (!cm_id_priv)
29206 return;
29207
29208 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29209 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29210 counter[CM_REP_COUNTER]);
29211 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29212 if (ret)
29213 @@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
29214 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29215 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29216 spin_unlock_irq(&cm_id_priv->lock);
29217 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29218 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29219 counter[CM_RTU_COUNTER]);
29220 goto out;
29221 }
29222 @@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
29223 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29224 dreq_msg->local_comm_id);
29225 if (!cm_id_priv) {
29226 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29227 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29228 counter[CM_DREQ_COUNTER]);
29229 cm_issue_drep(work->port, work->mad_recv_wc);
29230 return -EINVAL;
29231 @@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
29232 case IB_CM_MRA_REP_RCVD:
29233 break;
29234 case IB_CM_TIMEWAIT:
29235 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29236 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29237 counter[CM_DREQ_COUNTER]);
29238 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29239 goto unlock;
29240 @@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
29241 cm_free_msg(msg);
29242 goto deref;
29243 case IB_CM_DREQ_RCVD:
29244 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29245 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29246 counter[CM_DREQ_COUNTER]);
29247 goto unlock;
29248 default:
29249 @@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
29250 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29251 cm_id_priv->msg, timeout)) {
29252 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29253 - atomic_long_inc(&work->port->
29254 + atomic_long_inc_unchecked(&work->port->
29255 counter_group[CM_RECV_DUPLICATES].
29256 counter[CM_MRA_COUNTER]);
29257 goto out;
29258 @@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
29259 break;
29260 case IB_CM_MRA_REQ_RCVD:
29261 case IB_CM_MRA_REP_RCVD:
29262 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29263 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29264 counter[CM_MRA_COUNTER]);
29265 /* fall through */
29266 default:
29267 @@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
29268 case IB_CM_LAP_IDLE:
29269 break;
29270 case IB_CM_MRA_LAP_SENT:
29271 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29272 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29273 counter[CM_LAP_COUNTER]);
29274 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29275 goto unlock;
29276 @@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
29277 cm_free_msg(msg);
29278 goto deref;
29279 case IB_CM_LAP_RCVD:
29280 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29281 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29282 counter[CM_LAP_COUNTER]);
29283 goto unlock;
29284 default:
29285 @@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
29286 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29287 if (cur_cm_id_priv) {
29288 spin_unlock_irq(&cm.lock);
29289 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29290 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29291 counter[CM_SIDR_REQ_COUNTER]);
29292 goto out; /* Duplicate message. */
29293 }
29294 @@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
29295 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29296 msg->retries = 1;
29297
29298 - atomic_long_add(1 + msg->retries,
29299 + atomic_long_add_unchecked(1 + msg->retries,
29300 &port->counter_group[CM_XMIT].counter[attr_index]);
29301 if (msg->retries)
29302 - atomic_long_add(msg->retries,
29303 + atomic_long_add_unchecked(msg->retries,
29304 &port->counter_group[CM_XMIT_RETRIES].
29305 counter[attr_index]);
29306
29307 @@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
29308 }
29309
29310 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29311 - atomic_long_inc(&port->counter_group[CM_RECV].
29312 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29313 counter[attr_id - CM_ATTR_ID_OFFSET]);
29314
29315 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29316 @@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
29317 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29318
29319 return sprintf(buf, "%ld\n",
29320 - atomic_long_read(&group->counter[cm_attr->index]));
29321 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
29322 }
29323
29324 -static struct sysfs_ops cm_counter_ops = {
29325 +static const struct sysfs_ops cm_counter_ops = {
29326 .show = cm_show_counter
29327 };
29328
29329 diff -urNp linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c
29330 --- linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
29331 +++ linux-2.6.32.42/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
29332 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
29333
29334 struct task_struct *thread;
29335
29336 - atomic_t req_ser;
29337 - atomic_t flush_ser;
29338 + atomic_unchecked_t req_ser;
29339 + atomic_unchecked_t flush_ser;
29340
29341 wait_queue_head_t force_wait;
29342 };
29343 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
29344 struct ib_fmr_pool *pool = pool_ptr;
29345
29346 do {
29347 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
29348 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
29349 ib_fmr_batch_release(pool);
29350
29351 - atomic_inc(&pool->flush_ser);
29352 + atomic_inc_unchecked(&pool->flush_ser);
29353 wake_up_interruptible(&pool->force_wait);
29354
29355 if (pool->flush_function)
29356 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
29357 }
29358
29359 set_current_state(TASK_INTERRUPTIBLE);
29360 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
29361 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
29362 !kthread_should_stop())
29363 schedule();
29364 __set_current_state(TASK_RUNNING);
29365 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
29366 pool->dirty_watermark = params->dirty_watermark;
29367 pool->dirty_len = 0;
29368 spin_lock_init(&pool->pool_lock);
29369 - atomic_set(&pool->req_ser, 0);
29370 - atomic_set(&pool->flush_ser, 0);
29371 + atomic_set_unchecked(&pool->req_ser, 0);
29372 + atomic_set_unchecked(&pool->flush_ser, 0);
29373 init_waitqueue_head(&pool->force_wait);
29374
29375 pool->thread = kthread_run(ib_fmr_cleanup_thread,
29376 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
29377 }
29378 spin_unlock_irq(&pool->pool_lock);
29379
29380 - serial = atomic_inc_return(&pool->req_ser);
29381 + serial = atomic_inc_return_unchecked(&pool->req_ser);
29382 wake_up_process(pool->thread);
29383
29384 if (wait_event_interruptible(pool->force_wait,
29385 - atomic_read(&pool->flush_ser) - serial >= 0))
29386 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
29387 return -EINTR;
29388
29389 return 0;
29390 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
29391 } else {
29392 list_add_tail(&fmr->list, &pool->dirty_list);
29393 if (++pool->dirty_len >= pool->dirty_watermark) {
29394 - atomic_inc(&pool->req_ser);
29395 + atomic_inc_unchecked(&pool->req_ser);
29396 wake_up_process(pool->thread);
29397 }
29398 }
29399 diff -urNp linux-2.6.32.42/drivers/infiniband/core/sysfs.c linux-2.6.32.42/drivers/infiniband/core/sysfs.c
29400 --- linux-2.6.32.42/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
29401 +++ linux-2.6.32.42/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
29402 @@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
29403 return port_attr->show(p, port_attr, buf);
29404 }
29405
29406 -static struct sysfs_ops port_sysfs_ops = {
29407 +static const struct sysfs_ops port_sysfs_ops = {
29408 .show = port_attr_show
29409 };
29410
29411 diff -urNp linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c
29412 --- linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
29413 +++ linux-2.6.32.42/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
29414 @@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
29415 dst->grh.sgid_index = src->grh.sgid_index;
29416 dst->grh.hop_limit = src->grh.hop_limit;
29417 dst->grh.traffic_class = src->grh.traffic_class;
29418 + memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
29419 dst->dlid = src->dlid;
29420 dst->sl = src->sl;
29421 dst->src_path_bits = src->src_path_bits;
29422 dst->static_rate = src->static_rate;
29423 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
29424 dst->port_num = src->port_num;
29425 + dst->reserved = 0;
29426 }
29427 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
29428
29429 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
29430 struct ib_qp_attr *src)
29431 {
29432 + dst->qp_state = src->qp_state;
29433 dst->cur_qp_state = src->cur_qp_state;
29434 dst->path_mtu = src->path_mtu;
29435 dst->path_mig_state = src->path_mig_state;
29436 @@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
29437 dst->rnr_retry = src->rnr_retry;
29438 dst->alt_port_num = src->alt_port_num;
29439 dst->alt_timeout = src->alt_timeout;
29440 + memset(dst->reserved, 0, sizeof(dst->reserved));
29441 }
29442 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
29443
29444 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c
29445 --- linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
29446 +++ linux-2.6.32.42/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
29447 @@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
29448 struct infinipath_counters counters;
29449 struct ipath_devdata *dd;
29450
29451 + pax_track_stack();
29452 +
29453 dd = file->f_path.dentry->d_inode->i_private;
29454 dd->ipath_f_read_counters(dd, &counters);
29455
29456 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c
29457 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
29458 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
29459 @@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
29460 LIST_HEAD(nes_adapter_list);
29461 static LIST_HEAD(nes_dev_list);
29462
29463 -atomic_t qps_destroyed;
29464 +atomic_unchecked_t qps_destroyed;
29465
29466 static unsigned int ee_flsh_adapter;
29467 static unsigned int sysfs_nonidx_addr;
29468 @@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
29469 struct nes_adapter *nesadapter = nesdev->nesadapter;
29470 u32 qp_id;
29471
29472 - atomic_inc(&qps_destroyed);
29473 + atomic_inc_unchecked(&qps_destroyed);
29474
29475 /* Free the control structures */
29476
29477 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c
29478 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
29479 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
29480 @@ -69,11 +69,11 @@ u32 cm_packets_received;
29481 u32 cm_listens_created;
29482 u32 cm_listens_destroyed;
29483 u32 cm_backlog_drops;
29484 -atomic_t cm_loopbacks;
29485 -atomic_t cm_nodes_created;
29486 -atomic_t cm_nodes_destroyed;
29487 -atomic_t cm_accel_dropped_pkts;
29488 -atomic_t cm_resets_recvd;
29489 +atomic_unchecked_t cm_loopbacks;
29490 +atomic_unchecked_t cm_nodes_created;
29491 +atomic_unchecked_t cm_nodes_destroyed;
29492 +atomic_unchecked_t cm_accel_dropped_pkts;
29493 +atomic_unchecked_t cm_resets_recvd;
29494
29495 static inline int mini_cm_accelerated(struct nes_cm_core *,
29496 struct nes_cm_node *);
29497 @@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
29498
29499 static struct nes_cm_core *g_cm_core;
29500
29501 -atomic_t cm_connects;
29502 -atomic_t cm_accepts;
29503 -atomic_t cm_disconnects;
29504 -atomic_t cm_closes;
29505 -atomic_t cm_connecteds;
29506 -atomic_t cm_connect_reqs;
29507 -atomic_t cm_rejects;
29508 +atomic_unchecked_t cm_connects;
29509 +atomic_unchecked_t cm_accepts;
29510 +atomic_unchecked_t cm_disconnects;
29511 +atomic_unchecked_t cm_closes;
29512 +atomic_unchecked_t cm_connecteds;
29513 +atomic_unchecked_t cm_connect_reqs;
29514 +atomic_unchecked_t cm_rejects;
29515
29516
29517 /**
29518 @@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
29519 cm_node->rem_mac);
29520
29521 add_hte_node(cm_core, cm_node);
29522 - atomic_inc(&cm_nodes_created);
29523 + atomic_inc_unchecked(&cm_nodes_created);
29524
29525 return cm_node;
29526 }
29527 @@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
29528 }
29529
29530 atomic_dec(&cm_core->node_cnt);
29531 - atomic_inc(&cm_nodes_destroyed);
29532 + atomic_inc_unchecked(&cm_nodes_destroyed);
29533 nesqp = cm_node->nesqp;
29534 if (nesqp) {
29535 nesqp->cm_node = NULL;
29536 @@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
29537
29538 static void drop_packet(struct sk_buff *skb)
29539 {
29540 - atomic_inc(&cm_accel_dropped_pkts);
29541 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
29542 dev_kfree_skb_any(skb);
29543 }
29544
29545 @@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
29546
29547 int reset = 0; /* whether to send reset in case of err.. */
29548 int passive_state;
29549 - atomic_inc(&cm_resets_recvd);
29550 + atomic_inc_unchecked(&cm_resets_recvd);
29551 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
29552 " refcnt=%d\n", cm_node, cm_node->state,
29553 atomic_read(&cm_node->ref_count));
29554 @@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
29555 rem_ref_cm_node(cm_node->cm_core, cm_node);
29556 return NULL;
29557 }
29558 - atomic_inc(&cm_loopbacks);
29559 + atomic_inc_unchecked(&cm_loopbacks);
29560 loopbackremotenode->loopbackpartner = cm_node;
29561 loopbackremotenode->tcp_cntxt.rcv_wscale =
29562 NES_CM_DEFAULT_RCV_WND_SCALE;
29563 @@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
29564 add_ref_cm_node(cm_node);
29565 } else if (cm_node->state == NES_CM_STATE_TSA) {
29566 rem_ref_cm_node(cm_core, cm_node);
29567 - atomic_inc(&cm_accel_dropped_pkts);
29568 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
29569 dev_kfree_skb_any(skb);
29570 break;
29571 }
29572 @@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
29573
29574 if ((cm_id) && (cm_id->event_handler)) {
29575 if (issue_disconn) {
29576 - atomic_inc(&cm_disconnects);
29577 + atomic_inc_unchecked(&cm_disconnects);
29578 cm_event.event = IW_CM_EVENT_DISCONNECT;
29579 cm_event.status = disconn_status;
29580 cm_event.local_addr = cm_id->local_addr;
29581 @@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
29582 }
29583
29584 if (issue_close) {
29585 - atomic_inc(&cm_closes);
29586 + atomic_inc_unchecked(&cm_closes);
29587 nes_disconnect(nesqp, 1);
29588
29589 cm_id->provider_data = nesqp;
29590 @@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
29591
29592 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
29593 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
29594 - atomic_inc(&cm_accepts);
29595 + atomic_inc_unchecked(&cm_accepts);
29596
29597 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
29598 atomic_read(&nesvnic->netdev->refcnt));
29599 @@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
29600
29601 struct nes_cm_core *cm_core;
29602
29603 - atomic_inc(&cm_rejects);
29604 + atomic_inc_unchecked(&cm_rejects);
29605 cm_node = (struct nes_cm_node *) cm_id->provider_data;
29606 loopback = cm_node->loopbackpartner;
29607 cm_core = cm_node->cm_core;
29608 @@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
29609 ntohl(cm_id->local_addr.sin_addr.s_addr),
29610 ntohs(cm_id->local_addr.sin_port));
29611
29612 - atomic_inc(&cm_connects);
29613 + atomic_inc_unchecked(&cm_connects);
29614 nesqp->active_conn = 1;
29615
29616 /* cache the cm_id in the qp */
29617 @@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
29618 if (nesqp->destroyed) {
29619 return;
29620 }
29621 - atomic_inc(&cm_connecteds);
29622 + atomic_inc_unchecked(&cm_connecteds);
29623 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
29624 " local port 0x%04X. jiffies = %lu.\n",
29625 nesqp->hwqp.qp_id,
29626 @@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
29627
29628 ret = cm_id->event_handler(cm_id, &cm_event);
29629 cm_id->add_ref(cm_id);
29630 - atomic_inc(&cm_closes);
29631 + atomic_inc_unchecked(&cm_closes);
29632 cm_event.event = IW_CM_EVENT_CLOSE;
29633 cm_event.status = IW_CM_EVENT_STATUS_OK;
29634 cm_event.provider_data = cm_id->provider_data;
29635 @@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
29636 return;
29637 cm_id = cm_node->cm_id;
29638
29639 - atomic_inc(&cm_connect_reqs);
29640 + atomic_inc_unchecked(&cm_connect_reqs);
29641 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
29642 cm_node, cm_id, jiffies);
29643
29644 @@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
29645 return;
29646 cm_id = cm_node->cm_id;
29647
29648 - atomic_inc(&cm_connect_reqs);
29649 + atomic_inc_unchecked(&cm_connect_reqs);
29650 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
29651 cm_node, cm_id, jiffies);
29652
29653 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h
29654 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
29655 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
29656 @@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
29657 extern unsigned int wqm_quanta;
29658 extern struct list_head nes_adapter_list;
29659
29660 -extern atomic_t cm_connects;
29661 -extern atomic_t cm_accepts;
29662 -extern atomic_t cm_disconnects;
29663 -extern atomic_t cm_closes;
29664 -extern atomic_t cm_connecteds;
29665 -extern atomic_t cm_connect_reqs;
29666 -extern atomic_t cm_rejects;
29667 -extern atomic_t mod_qp_timouts;
29668 -extern atomic_t qps_created;
29669 -extern atomic_t qps_destroyed;
29670 -extern atomic_t sw_qps_destroyed;
29671 +extern atomic_unchecked_t cm_connects;
29672 +extern atomic_unchecked_t cm_accepts;
29673 +extern atomic_unchecked_t cm_disconnects;
29674 +extern atomic_unchecked_t cm_closes;
29675 +extern atomic_unchecked_t cm_connecteds;
29676 +extern atomic_unchecked_t cm_connect_reqs;
29677 +extern atomic_unchecked_t cm_rejects;
29678 +extern atomic_unchecked_t mod_qp_timouts;
29679 +extern atomic_unchecked_t qps_created;
29680 +extern atomic_unchecked_t qps_destroyed;
29681 +extern atomic_unchecked_t sw_qps_destroyed;
29682 extern u32 mh_detected;
29683 extern u32 mh_pauses_sent;
29684 extern u32 cm_packets_sent;
29685 @@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
29686 extern u32 cm_listens_created;
29687 extern u32 cm_listens_destroyed;
29688 extern u32 cm_backlog_drops;
29689 -extern atomic_t cm_loopbacks;
29690 -extern atomic_t cm_nodes_created;
29691 -extern atomic_t cm_nodes_destroyed;
29692 -extern atomic_t cm_accel_dropped_pkts;
29693 -extern atomic_t cm_resets_recvd;
29694 +extern atomic_unchecked_t cm_loopbacks;
29695 +extern atomic_unchecked_t cm_nodes_created;
29696 +extern atomic_unchecked_t cm_nodes_destroyed;
29697 +extern atomic_unchecked_t cm_accel_dropped_pkts;
29698 +extern atomic_unchecked_t cm_resets_recvd;
29699
29700 extern u32 int_mod_timer_init;
29701 extern u32 int_mod_cq_depth_256;
29702 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c
29703 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
29704 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
29705 @@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
29706 target_stat_values[++index] = mh_detected;
29707 target_stat_values[++index] = mh_pauses_sent;
29708 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
29709 - target_stat_values[++index] = atomic_read(&cm_connects);
29710 - target_stat_values[++index] = atomic_read(&cm_accepts);
29711 - target_stat_values[++index] = atomic_read(&cm_disconnects);
29712 - target_stat_values[++index] = atomic_read(&cm_connecteds);
29713 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
29714 - target_stat_values[++index] = atomic_read(&cm_rejects);
29715 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
29716 - target_stat_values[++index] = atomic_read(&qps_created);
29717 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
29718 - target_stat_values[++index] = atomic_read(&qps_destroyed);
29719 - target_stat_values[++index] = atomic_read(&cm_closes);
29720 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
29721 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
29722 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
29723 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
29724 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
29725 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
29726 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
29727 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
29728 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
29729 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
29730 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
29731 target_stat_values[++index] = cm_packets_sent;
29732 target_stat_values[++index] = cm_packets_bounced;
29733 target_stat_values[++index] = cm_packets_created;
29734 @@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
29735 target_stat_values[++index] = cm_listens_created;
29736 target_stat_values[++index] = cm_listens_destroyed;
29737 target_stat_values[++index] = cm_backlog_drops;
29738 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
29739 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
29740 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
29741 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
29742 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
29743 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
29744 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
29745 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
29746 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
29747 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
29748 target_stat_values[++index] = int_mod_timer_init;
29749 target_stat_values[++index] = int_mod_cq_depth_1;
29750 target_stat_values[++index] = int_mod_cq_depth_4;
29751 diff -urNp linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c
29752 --- linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
29753 +++ linux-2.6.32.42/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
29754 @@ -45,9 +45,9 @@
29755
29756 #include <rdma/ib_umem.h>
29757
29758 -atomic_t mod_qp_timouts;
29759 -atomic_t qps_created;
29760 -atomic_t sw_qps_destroyed;
29761 +atomic_unchecked_t mod_qp_timouts;
29762 +atomic_unchecked_t qps_created;
29763 +atomic_unchecked_t sw_qps_destroyed;
29764
29765 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
29766
29767 @@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
29768 if (init_attr->create_flags)
29769 return ERR_PTR(-EINVAL);
29770
29771 - atomic_inc(&qps_created);
29772 + atomic_inc_unchecked(&qps_created);
29773 switch (init_attr->qp_type) {
29774 case IB_QPT_RC:
29775 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
29776 @@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
29777 struct iw_cm_event cm_event;
29778 int ret;
29779
29780 - atomic_inc(&sw_qps_destroyed);
29781 + atomic_inc_unchecked(&sw_qps_destroyed);
29782 nesqp->destroyed = 1;
29783
29784 /* Blow away the connection if it exists. */
29785 diff -urNp linux-2.6.32.42/drivers/input/gameport/gameport.c linux-2.6.32.42/drivers/input/gameport/gameport.c
29786 --- linux-2.6.32.42/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
29787 +++ linux-2.6.32.42/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
29788 @@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
29789 */
29790 static void gameport_init_port(struct gameport *gameport)
29791 {
29792 - static atomic_t gameport_no = ATOMIC_INIT(0);
29793 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
29794
29795 __module_get(THIS_MODULE);
29796
29797 mutex_init(&gameport->drv_mutex);
29798 device_initialize(&gameport->dev);
29799 - dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
29800 + dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
29801 gameport->dev.bus = &gameport_bus;
29802 gameport->dev.release = gameport_release_port;
29803 if (gameport->parent)
29804 diff -urNp linux-2.6.32.42/drivers/input/input.c linux-2.6.32.42/drivers/input/input.c
29805 --- linux-2.6.32.42/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
29806 +++ linux-2.6.32.42/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
29807 @@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
29808 */
29809 int input_register_device(struct input_dev *dev)
29810 {
29811 - static atomic_t input_no = ATOMIC_INIT(0);
29812 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
29813 struct input_handler *handler;
29814 const char *path;
29815 int error;
29816 @@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
29817 dev->setkeycode = input_default_setkeycode;
29818
29819 dev_set_name(&dev->dev, "input%ld",
29820 - (unsigned long) atomic_inc_return(&input_no) - 1);
29821 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
29822
29823 error = device_add(&dev->dev);
29824 if (error)
29825 diff -urNp linux-2.6.32.42/drivers/input/joystick/sidewinder.c linux-2.6.32.42/drivers/input/joystick/sidewinder.c
29826 --- linux-2.6.32.42/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
29827 +++ linux-2.6.32.42/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
29828 @@ -30,6 +30,7 @@
29829 #include <linux/kernel.h>
29830 #include <linux/module.h>
29831 #include <linux/slab.h>
29832 +#include <linux/sched.h>
29833 #include <linux/init.h>
29834 #include <linux/input.h>
29835 #include <linux/gameport.h>
29836 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
29837 unsigned char buf[SW_LENGTH];
29838 int i;
29839
29840 + pax_track_stack();
29841 +
29842 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
29843
29844 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
29845 diff -urNp linux-2.6.32.42/drivers/input/joystick/xpad.c linux-2.6.32.42/drivers/input/joystick/xpad.c
29846 --- linux-2.6.32.42/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
29847 +++ linux-2.6.32.42/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
29848 @@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
29849
29850 static int xpad_led_probe(struct usb_xpad *xpad)
29851 {
29852 - static atomic_t led_seq = ATOMIC_INIT(0);
29853 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
29854 long led_no;
29855 struct xpad_led *led;
29856 struct led_classdev *led_cdev;
29857 @@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
29858 if (!led)
29859 return -ENOMEM;
29860
29861 - led_no = (long)atomic_inc_return(&led_seq) - 1;
29862 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
29863
29864 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
29865 led->xpad = xpad;
29866 diff -urNp linux-2.6.32.42/drivers/input/serio/serio.c linux-2.6.32.42/drivers/input/serio/serio.c
29867 --- linux-2.6.32.42/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
29868 +++ linux-2.6.32.42/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
29869 @@ -527,7 +527,7 @@ static void serio_release_port(struct de
29870 */
29871 static void serio_init_port(struct serio *serio)
29872 {
29873 - static atomic_t serio_no = ATOMIC_INIT(0);
29874 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
29875
29876 __module_get(THIS_MODULE);
29877
29878 @@ -536,7 +536,7 @@ static void serio_init_port(struct serio
29879 mutex_init(&serio->drv_mutex);
29880 device_initialize(&serio->dev);
29881 dev_set_name(&serio->dev, "serio%ld",
29882 - (long)atomic_inc_return(&serio_no) - 1);
29883 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
29884 serio->dev.bus = &serio_bus;
29885 serio->dev.release = serio_release_port;
29886 if (serio->parent) {
29887 diff -urNp linux-2.6.32.42/drivers/isdn/gigaset/common.c linux-2.6.32.42/drivers/isdn/gigaset/common.c
29888 --- linux-2.6.32.42/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
29889 +++ linux-2.6.32.42/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
29890 @@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
29891 cs->commands_pending = 0;
29892 cs->cur_at_seq = 0;
29893 cs->gotfwver = -1;
29894 - cs->open_count = 0;
29895 + local_set(&cs->open_count, 0);
29896 cs->dev = NULL;
29897 cs->tty = NULL;
29898 cs->tty_dev = NULL;
29899 diff -urNp linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h
29900 --- linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
29901 +++ linux-2.6.32.42/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
29902 @@ -34,6 +34,7 @@
29903 #include <linux/tty_driver.h>
29904 #include <linux/list.h>
29905 #include <asm/atomic.h>
29906 +#include <asm/local.h>
29907
29908 #define GIG_VERSION {0,5,0,0}
29909 #define GIG_COMPAT {0,4,0,0}
29910 @@ -446,7 +447,7 @@ struct cardstate {
29911 spinlock_t cmdlock;
29912 unsigned curlen, cmdbytes;
29913
29914 - unsigned open_count;
29915 + local_t open_count;
29916 struct tty_struct *tty;
29917 struct tasklet_struct if_wake_tasklet;
29918 unsigned control_state;
29919 diff -urNp linux-2.6.32.42/drivers/isdn/gigaset/interface.c linux-2.6.32.42/drivers/isdn/gigaset/interface.c
29920 --- linux-2.6.32.42/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
29921 +++ linux-2.6.32.42/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
29922 @@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
29923 return -ERESTARTSYS; // FIXME -EINTR?
29924 tty->driver_data = cs;
29925
29926 - ++cs->open_count;
29927 -
29928 - if (cs->open_count == 1) {
29929 + if (local_inc_return(&cs->open_count) == 1) {
29930 spin_lock_irqsave(&cs->lock, flags);
29931 cs->tty = tty;
29932 spin_unlock_irqrestore(&cs->lock, flags);
29933 @@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
29934
29935 if (!cs->connected)
29936 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29937 - else if (!cs->open_count)
29938 + else if (!local_read(&cs->open_count))
29939 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29940 else {
29941 - if (!--cs->open_count) {
29942 + if (!local_dec_return(&cs->open_count)) {
29943 spin_lock_irqsave(&cs->lock, flags);
29944 cs->tty = NULL;
29945 spin_unlock_irqrestore(&cs->lock, flags);
29946 @@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
29947 if (!cs->connected) {
29948 gig_dbg(DEBUG_IF, "not connected");
29949 retval = -ENODEV;
29950 - } else if (!cs->open_count)
29951 + } else if (!local_read(&cs->open_count))
29952 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29953 else {
29954 retval = 0;
29955 @@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
29956 if (!cs->connected) {
29957 gig_dbg(DEBUG_IF, "not connected");
29958 retval = -ENODEV;
29959 - } else if (!cs->open_count)
29960 + } else if (!local_read(&cs->open_count))
29961 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29962 else if (cs->mstate != MS_LOCKED) {
29963 dev_warn(cs->dev, "can't write to unlocked device\n");
29964 @@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
29965 if (!cs->connected) {
29966 gig_dbg(DEBUG_IF, "not connected");
29967 retval = -ENODEV;
29968 - } else if (!cs->open_count)
29969 + } else if (!local_read(&cs->open_count))
29970 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29971 else if (cs->mstate != MS_LOCKED) {
29972 dev_warn(cs->dev, "can't write to unlocked device\n");
29973 @@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
29974
29975 if (!cs->connected)
29976 gig_dbg(DEBUG_IF, "not connected");
29977 - else if (!cs->open_count)
29978 + else if (!local_read(&cs->open_count))
29979 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29980 else if (cs->mstate != MS_LOCKED)
29981 dev_warn(cs->dev, "can't write to unlocked device\n");
29982 @@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
29983
29984 if (!cs->connected)
29985 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29986 - else if (!cs->open_count)
29987 + else if (!local_read(&cs->open_count))
29988 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29989 else {
29990 //FIXME
29991 @@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
29992
29993 if (!cs->connected)
29994 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
29995 - else if (!cs->open_count)
29996 + else if (!local_read(&cs->open_count))
29997 dev_warn(cs->dev, "%s: device not opened\n", __func__);
29998 else {
29999 //FIXME
30000 @@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
30001 goto out;
30002 }
30003
30004 - if (!cs->open_count) {
30005 + if (!local_read(&cs->open_count)) {
30006 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30007 goto out;
30008 }
30009 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c
30010 --- linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
30011 +++ linux-2.6.32.42/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
30012 @@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
30013 }
30014 if (left) {
30015 if (t4file->user) {
30016 - if (copy_from_user(buf, dp, left))
30017 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30018 return -EFAULT;
30019 } else {
30020 memcpy(buf, dp, left);
30021 @@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
30022 }
30023 if (left) {
30024 if (config->user) {
30025 - if (copy_from_user(buf, dp, left))
30026 + if (left > sizeof buf || copy_from_user(buf, dp, left))
30027 return -EFAULT;
30028 } else {
30029 memcpy(buf, dp, left);
30030 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c
30031 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
30032 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
30033 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
30034 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
30035 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
30036
30037 + pax_track_stack();
30038
30039 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
30040 {
30041 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c
30042 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
30043 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
30044 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
30045 IDI_SYNC_REQ req;
30046 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30047
30048 + pax_track_stack();
30049 +
30050 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30051
30052 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30053 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c
30054 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
30055 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
30056 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
30057 IDI_SYNC_REQ req;
30058 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30059
30060 + pax_track_stack();
30061 +
30062 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30063
30064 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30065 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c
30066 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
30067 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
30068 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
30069 IDI_SYNC_REQ req;
30070 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30071
30072 + pax_track_stack();
30073 +
30074 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30075
30076 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30077 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c
30078 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
30079 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
30080 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
30081 IDI_SYNC_REQ req;
30082 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30083
30084 + pax_track_stack();
30085 +
30086 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30087
30088 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30089 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c
30090 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
30091 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
30092 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
30093 dword d;
30094 word w;
30095
30096 + pax_track_stack();
30097 +
30098 a = plci->adapter;
30099 Id = ((word)plci->Id<<8)|a->Id;
30100 PUT_WORD(&SS_Ind[4],0x0000);
30101 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
30102 word j, n, w;
30103 dword d;
30104
30105 + pax_track_stack();
30106 +
30107
30108 for(i=0;i<8;i++) bp_parms[i].length = 0;
30109 for(i=0;i<2;i++) global_config[i].length = 0;
30110 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
30111 const byte llc3[] = {4,3,2,2,6,6,0};
30112 const byte header[] = {0,2,3,3,0,0,0};
30113
30114 + pax_track_stack();
30115 +
30116 for(i=0;i<8;i++) bp_parms[i].length = 0;
30117 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
30118 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
30119 @@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
30120 word appl_number_group_type[MAX_APPL];
30121 PLCI *auxplci;
30122
30123 + pax_track_stack();
30124 +
30125 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
30126
30127 if(!a->group_optimization_enabled)
30128 diff -urNp linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c
30129 --- linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
30130 +++ linux-2.6.32.42/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
30131 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
30132 IDI_SYNC_REQ req;
30133 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30134
30135 + pax_track_stack();
30136 +
30137 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30138
30139 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30140 diff -urNp linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c
30141 --- linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
30142 +++ linux-2.6.32.42/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
30143 @@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
30144 } iocpar;
30145 void __user *argp = (void __user *)arg;
30146
30147 + pax_track_stack();
30148 +
30149 #define name iocpar.name
30150 #define bname iocpar.bname
30151 #define iocts iocpar.iocts
30152 diff -urNp linux-2.6.32.42/drivers/isdn/icn/icn.c linux-2.6.32.42/drivers/isdn/icn/icn.c
30153 --- linux-2.6.32.42/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
30154 +++ linux-2.6.32.42/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
30155 @@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
30156 if (count > len)
30157 count = len;
30158 if (user) {
30159 - if (copy_from_user(msg, buf, count))
30160 + if (count > sizeof msg || copy_from_user(msg, buf, count))
30161 return -EFAULT;
30162 } else
30163 memcpy(msg, buf, count);
30164 diff -urNp linux-2.6.32.42/drivers/isdn/mISDN/socket.c linux-2.6.32.42/drivers/isdn/mISDN/socket.c
30165 --- linux-2.6.32.42/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
30166 +++ linux-2.6.32.42/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
30167 @@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
30168 if (dev) {
30169 struct mISDN_devinfo di;
30170
30171 + memset(&di, 0, sizeof(di));
30172 di.id = dev->id;
30173 di.Dprotocols = dev->Dprotocols;
30174 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30175 @@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
30176 if (dev) {
30177 struct mISDN_devinfo di;
30178
30179 + memset(&di, 0, sizeof(di));
30180 di.id = dev->id;
30181 di.Dprotocols = dev->Dprotocols;
30182 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30183 diff -urNp linux-2.6.32.42/drivers/isdn/sc/interrupt.c linux-2.6.32.42/drivers/isdn/sc/interrupt.c
30184 --- linux-2.6.32.42/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
30185 +++ linux-2.6.32.42/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
30186 @@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
30187 }
30188 else if(callid>=0x0000 && callid<=0x7FFF)
30189 {
30190 + int len;
30191 +
30192 pr_debug("%s: Got Incoming Call\n",
30193 sc_adapter[card]->devicename);
30194 - strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
30195 - strcpy(setup.eazmsn,
30196 - sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
30197 + len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
30198 + sizeof(setup.phone));
30199 + if (len >= sizeof(setup.phone))
30200 + continue;
30201 + len = strlcpy(setup.eazmsn,
30202 + sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30203 + sizeof(setup.eazmsn));
30204 + if (len >= sizeof(setup.eazmsn))
30205 + continue;
30206 setup.si1 = 7;
30207 setup.si2 = 0;
30208 setup.plan = 0;
30209 @@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
30210 * Handle a GetMyNumber Rsp
30211 */
30212 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
30213 - strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
30214 + strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30215 + rcvmsg.msg_data.byte_array,
30216 + sizeof(rcvmsg.msg_data.byte_array));
30217 continue;
30218 }
30219
30220 diff -urNp linux-2.6.32.42/drivers/lguest/core.c linux-2.6.32.42/drivers/lguest/core.c
30221 --- linux-2.6.32.42/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
30222 +++ linux-2.6.32.42/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
30223 @@ -91,9 +91,17 @@ static __init int map_switcher(void)
30224 * it's worked so far. The end address needs +1 because __get_vm_area
30225 * allocates an extra guard page, so we need space for that.
30226 */
30227 +
30228 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30229 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30230 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30231 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30232 +#else
30233 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30234 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30235 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30236 +#endif
30237 +
30238 if (!switcher_vma) {
30239 err = -ENOMEM;
30240 printk("lguest: could not map switcher pages high\n");
30241 @@ -118,7 +126,7 @@ static __init int map_switcher(void)
30242 * Now the Switcher is mapped at the right address, we can't fail!
30243 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
30244 */
30245 - memcpy(switcher_vma->addr, start_switcher_text,
30246 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30247 end_switcher_text - start_switcher_text);
30248
30249 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30250 diff -urNp linux-2.6.32.42/drivers/lguest/x86/core.c linux-2.6.32.42/drivers/lguest/x86/core.c
30251 --- linux-2.6.32.42/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
30252 +++ linux-2.6.32.42/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
30253 @@ -59,7 +59,7 @@ static struct {
30254 /* Offset from where switcher.S was compiled to where we've copied it */
30255 static unsigned long switcher_offset(void)
30256 {
30257 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30258 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30259 }
30260
30261 /* This cpu's struct lguest_pages. */
30262 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
30263 * These copies are pretty cheap, so we do them unconditionally: */
30264 /* Save the current Host top-level page directory.
30265 */
30266 +
30267 +#ifdef CONFIG_PAX_PER_CPU_PGD
30268 + pages->state.host_cr3 = read_cr3();
30269 +#else
30270 pages->state.host_cr3 = __pa(current->mm->pgd);
30271 +#endif
30272 +
30273 /*
30274 * Set up the Guest's page tables to see this CPU's pages (and no
30275 * other CPU's pages).
30276 @@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
30277 * compiled-in switcher code and the high-mapped copy we just made.
30278 */
30279 for (i = 0; i < IDT_ENTRIES; i++)
30280 - default_idt_entries[i] += switcher_offset();
30281 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30282
30283 /*
30284 * Set up the Switcher's per-cpu areas.
30285 @@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
30286 * it will be undisturbed when we switch. To change %cs and jump we
30287 * need this structure to feed to Intel's "lcall" instruction.
30288 */
30289 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30290 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30291 lguest_entry.segment = LGUEST_CS;
30292
30293 /*
30294 diff -urNp linux-2.6.32.42/drivers/lguest/x86/switcher_32.S linux-2.6.32.42/drivers/lguest/x86/switcher_32.S
30295 --- linux-2.6.32.42/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
30296 +++ linux-2.6.32.42/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
30297 @@ -87,6 +87,7 @@
30298 #include <asm/page.h>
30299 #include <asm/segment.h>
30300 #include <asm/lguest.h>
30301 +#include <asm/processor-flags.h>
30302
30303 // We mark the start of the code to copy
30304 // It's placed in .text tho it's never run here
30305 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30306 // Changes type when we load it: damn Intel!
30307 // For after we switch over our page tables
30308 // That entry will be read-only: we'd crash.
30309 +
30310 +#ifdef CONFIG_PAX_KERNEXEC
30311 + mov %cr0, %edx
30312 + xor $X86_CR0_WP, %edx
30313 + mov %edx, %cr0
30314 +#endif
30315 +
30316 movl $(GDT_ENTRY_TSS*8), %edx
30317 ltr %dx
30318
30319 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30320 // Let's clear it again for our return.
30321 // The GDT descriptor of the Host
30322 // Points to the table after two "size" bytes
30323 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
30324 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
30325 // Clear "used" from type field (byte 5, bit 2)
30326 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
30327 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
30328 +
30329 +#ifdef CONFIG_PAX_KERNEXEC
30330 + mov %cr0, %eax
30331 + xor $X86_CR0_WP, %eax
30332 + mov %eax, %cr0
30333 +#endif
30334
30335 // Once our page table's switched, the Guest is live!
30336 // The Host fades as we run this final step.
30337 @@ -295,13 +309,12 @@ deliver_to_host:
30338 // I consulted gcc, and it gave
30339 // These instructions, which I gladly credit:
30340 leal (%edx,%ebx,8), %eax
30341 - movzwl (%eax),%edx
30342 - movl 4(%eax), %eax
30343 - xorw %ax, %ax
30344 - orl %eax, %edx
30345 + movl 4(%eax), %edx
30346 + movw (%eax), %dx
30347 // Now the address of the handler's in %edx
30348 // We call it now: its "iret" drops us home.
30349 - jmp *%edx
30350 + ljmp $__KERNEL_CS, $1f
30351 +1: jmp *%edx
30352
30353 // Every interrupt can come to us here
30354 // But we must truly tell each apart.
30355 diff -urNp linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c
30356 --- linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
30357 +++ linux-2.6.32.42/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
30358 @@ -15,7 +15,7 @@
30359
30360 #define MAX_PMU_LEVEL 0xFF
30361
30362 -static struct backlight_ops pmu_backlight_data;
30363 +static const struct backlight_ops pmu_backlight_data;
30364 static DEFINE_SPINLOCK(pmu_backlight_lock);
30365 static int sleeping, uses_pmu_bl;
30366 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
30367 @@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
30368 return bd->props.brightness;
30369 }
30370
30371 -static struct backlight_ops pmu_backlight_data = {
30372 +static const struct backlight_ops pmu_backlight_data = {
30373 .get_brightness = pmu_backlight_get_brightness,
30374 .update_status = pmu_backlight_update_status,
30375
30376 diff -urNp linux-2.6.32.42/drivers/macintosh/via-pmu.c linux-2.6.32.42/drivers/macintosh/via-pmu.c
30377 --- linux-2.6.32.42/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
30378 +++ linux-2.6.32.42/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
30379 @@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
30380 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
30381 }
30382
30383 -static struct platform_suspend_ops pmu_pm_ops = {
30384 +static const struct platform_suspend_ops pmu_pm_ops = {
30385 .enter = powerbook_sleep,
30386 .valid = pmu_sleep_valid,
30387 };
30388 diff -urNp linux-2.6.32.42/drivers/md/dm.c linux-2.6.32.42/drivers/md/dm.c
30389 --- linux-2.6.32.42/drivers/md/dm.c 2011-03-27 14:31:47.000000000 -0400
30390 +++ linux-2.6.32.42/drivers/md/dm.c 2011-05-04 17:56:28.000000000 -0400
30391 @@ -163,9 +163,9 @@ struct mapped_device {
30392 /*
30393 * Event handling.
30394 */
30395 - atomic_t event_nr;
30396 + atomic_unchecked_t event_nr;
30397 wait_queue_head_t eventq;
30398 - atomic_t uevent_seq;
30399 + atomic_unchecked_t uevent_seq;
30400 struct list_head uevent_list;
30401 spinlock_t uevent_lock; /* Protect access to uevent_list */
30402
30403 @@ -1770,8 +1770,8 @@ static struct mapped_device *alloc_dev(i
30404 rwlock_init(&md->map_lock);
30405 atomic_set(&md->holders, 1);
30406 atomic_set(&md->open_count, 0);
30407 - atomic_set(&md->event_nr, 0);
30408 - atomic_set(&md->uevent_seq, 0);
30409 + atomic_set_unchecked(&md->event_nr, 0);
30410 + atomic_set_unchecked(&md->uevent_seq, 0);
30411 INIT_LIST_HEAD(&md->uevent_list);
30412 spin_lock_init(&md->uevent_lock);
30413
30414 @@ -1921,7 +1921,7 @@ static void event_callback(void *context
30415
30416 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
30417
30418 - atomic_inc(&md->event_nr);
30419 + atomic_inc_unchecked(&md->event_nr);
30420 wake_up(&md->eventq);
30421 }
30422
30423 @@ -2556,18 +2556,18 @@ void dm_kobject_uevent(struct mapped_dev
30424
30425 uint32_t dm_next_uevent_seq(struct mapped_device *md)
30426 {
30427 - return atomic_add_return(1, &md->uevent_seq);
30428 + return atomic_add_return_unchecked(1, &md->uevent_seq);
30429 }
30430
30431 uint32_t dm_get_event_nr(struct mapped_device *md)
30432 {
30433 - return atomic_read(&md->event_nr);
30434 + return atomic_read_unchecked(&md->event_nr);
30435 }
30436
30437 int dm_wait_event(struct mapped_device *md, int event_nr)
30438 {
30439 return wait_event_interruptible(md->eventq,
30440 - (event_nr != atomic_read(&md->event_nr)));
30441 + (event_nr != atomic_read_unchecked(&md->event_nr)));
30442 }
30443
30444 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
30445 diff -urNp linux-2.6.32.42/drivers/md/dm-ioctl.c linux-2.6.32.42/drivers/md/dm-ioctl.c
30446 --- linux-2.6.32.42/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
30447 +++ linux-2.6.32.42/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
30448 @@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
30449 cmd == DM_LIST_VERSIONS_CMD)
30450 return 0;
30451
30452 - if ((cmd == DM_DEV_CREATE_CMD)) {
30453 + if (cmd == DM_DEV_CREATE_CMD) {
30454 if (!*param->name) {
30455 DMWARN("name not supplied when creating device");
30456 return -EINVAL;
30457 diff -urNp linux-2.6.32.42/drivers/md/dm-raid1.c linux-2.6.32.42/drivers/md/dm-raid1.c
30458 --- linux-2.6.32.42/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
30459 +++ linux-2.6.32.42/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
30460 @@ -41,7 +41,7 @@ enum dm_raid1_error {
30461
30462 struct mirror {
30463 struct mirror_set *ms;
30464 - atomic_t error_count;
30465 + atomic_unchecked_t error_count;
30466 unsigned long error_type;
30467 struct dm_dev *dev;
30468 sector_t offset;
30469 @@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
30470 * simple way to tell if a device has encountered
30471 * errors.
30472 */
30473 - atomic_inc(&m->error_count);
30474 + atomic_inc_unchecked(&m->error_count);
30475
30476 if (test_and_set_bit(error_type, &m->error_type))
30477 return;
30478 @@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
30479 }
30480
30481 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
30482 - if (!atomic_read(&new->error_count)) {
30483 + if (!atomic_read_unchecked(&new->error_count)) {
30484 set_default_mirror(new);
30485 break;
30486 }
30487 @@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
30488 struct mirror *m = get_default_mirror(ms);
30489
30490 do {
30491 - if (likely(!atomic_read(&m->error_count)))
30492 + if (likely(!atomic_read_unchecked(&m->error_count)))
30493 return m;
30494
30495 if (m-- == ms->mirror)
30496 @@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
30497 {
30498 struct mirror *default_mirror = get_default_mirror(m->ms);
30499
30500 - return !atomic_read(&default_mirror->error_count);
30501 + return !atomic_read_unchecked(&default_mirror->error_count);
30502 }
30503
30504 static int mirror_available(struct mirror_set *ms, struct bio *bio)
30505 @@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
30506 */
30507 if (likely(region_in_sync(ms, region, 1)))
30508 m = choose_mirror(ms, bio->bi_sector);
30509 - else if (m && atomic_read(&m->error_count))
30510 + else if (m && atomic_read_unchecked(&m->error_count))
30511 m = NULL;
30512
30513 if (likely(m))
30514 @@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
30515 }
30516
30517 ms->mirror[mirror].ms = ms;
30518 - atomic_set(&(ms->mirror[mirror].error_count), 0);
30519 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
30520 ms->mirror[mirror].error_type = 0;
30521 ms->mirror[mirror].offset = offset;
30522
30523 @@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
30524 */
30525 static char device_status_char(struct mirror *m)
30526 {
30527 - if (!atomic_read(&(m->error_count)))
30528 + if (!atomic_read_unchecked(&(m->error_count)))
30529 return 'A';
30530
30531 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
30532 diff -urNp linux-2.6.32.42/drivers/md/dm-stripe.c linux-2.6.32.42/drivers/md/dm-stripe.c
30533 --- linux-2.6.32.42/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
30534 +++ linux-2.6.32.42/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
30535 @@ -20,7 +20,7 @@ struct stripe {
30536 struct dm_dev *dev;
30537 sector_t physical_start;
30538
30539 - atomic_t error_count;
30540 + atomic_unchecked_t error_count;
30541 };
30542
30543 struct stripe_c {
30544 @@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
30545 kfree(sc);
30546 return r;
30547 }
30548 - atomic_set(&(sc->stripe[i].error_count), 0);
30549 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
30550 }
30551
30552 ti->private = sc;
30553 @@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
30554 DMEMIT("%d ", sc->stripes);
30555 for (i = 0; i < sc->stripes; i++) {
30556 DMEMIT("%s ", sc->stripe[i].dev->name);
30557 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
30558 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
30559 'D' : 'A';
30560 }
30561 buffer[i] = '\0';
30562 @@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
30563 */
30564 for (i = 0; i < sc->stripes; i++)
30565 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
30566 - atomic_inc(&(sc->stripe[i].error_count));
30567 - if (atomic_read(&(sc->stripe[i].error_count)) <
30568 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
30569 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
30570 DM_IO_ERROR_THRESHOLD)
30571 queue_work(kstriped, &sc->kstriped_ws);
30572 }
30573 diff -urNp linux-2.6.32.42/drivers/md/dm-sysfs.c linux-2.6.32.42/drivers/md/dm-sysfs.c
30574 --- linux-2.6.32.42/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
30575 +++ linux-2.6.32.42/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
30576 @@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
30577 NULL,
30578 };
30579
30580 -static struct sysfs_ops dm_sysfs_ops = {
30581 +static const struct sysfs_ops dm_sysfs_ops = {
30582 .show = dm_attr_show,
30583 };
30584
30585 diff -urNp linux-2.6.32.42/drivers/md/dm-table.c linux-2.6.32.42/drivers/md/dm-table.c
30586 --- linux-2.6.32.42/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
30587 +++ linux-2.6.32.42/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
30588 @@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
30589 if (!dev_size)
30590 return 0;
30591
30592 - if ((start >= dev_size) || (start + len > dev_size)) {
30593 + if ((start >= dev_size) || (len > dev_size - start)) {
30594 DMWARN("%s: %s too small for target: "
30595 "start=%llu, len=%llu, dev_size=%llu",
30596 dm_device_name(ti->table->md), bdevname(bdev, b),
30597 diff -urNp linux-2.6.32.42/drivers/md/md.c linux-2.6.32.42/drivers/md/md.c
30598 --- linux-2.6.32.42/drivers/md/md.c 2011-06-25 12:55:34.000000000 -0400
30599 +++ linux-2.6.32.42/drivers/md/md.c 2011-06-25 12:56:37.000000000 -0400
30600 @@ -153,10 +153,10 @@ static int start_readonly;
30601 * start build, activate spare
30602 */
30603 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
30604 -static atomic_t md_event_count;
30605 +static atomic_unchecked_t md_event_count;
30606 void md_new_event(mddev_t *mddev)
30607 {
30608 - atomic_inc(&md_event_count);
30609 + atomic_inc_unchecked(&md_event_count);
30610 wake_up(&md_event_waiters);
30611 }
30612 EXPORT_SYMBOL_GPL(md_new_event);
30613 @@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
30614 */
30615 static void md_new_event_inintr(mddev_t *mddev)
30616 {
30617 - atomic_inc(&md_event_count);
30618 + atomic_inc_unchecked(&md_event_count);
30619 wake_up(&md_event_waiters);
30620 }
30621
30622 @@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
30623
30624 rdev->preferred_minor = 0xffff;
30625 rdev->data_offset = le64_to_cpu(sb->data_offset);
30626 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
30627 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
30628
30629 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
30630 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
30631 @@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
30632 else
30633 sb->resync_offset = cpu_to_le64(0);
30634
30635 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
30636 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
30637
30638 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
30639 sb->size = cpu_to_le64(mddev->dev_sectors);
30640 @@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
30641 static ssize_t
30642 errors_show(mdk_rdev_t *rdev, char *page)
30643 {
30644 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
30645 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
30646 }
30647
30648 static ssize_t
30649 @@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
30650 char *e;
30651 unsigned long n = simple_strtoul(buf, &e, 10);
30652 if (*buf && (*e == 0 || *e == '\n')) {
30653 - atomic_set(&rdev->corrected_errors, n);
30654 + atomic_set_unchecked(&rdev->corrected_errors, n);
30655 return len;
30656 }
30657 return -EINVAL;
30658 @@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
30659 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
30660 kfree(rdev);
30661 }
30662 -static struct sysfs_ops rdev_sysfs_ops = {
30663 +static const struct sysfs_ops rdev_sysfs_ops = {
30664 .show = rdev_attr_show,
30665 .store = rdev_attr_store,
30666 };
30667 @@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
30668 rdev->data_offset = 0;
30669 rdev->sb_events = 0;
30670 atomic_set(&rdev->nr_pending, 0);
30671 - atomic_set(&rdev->read_errors, 0);
30672 - atomic_set(&rdev->corrected_errors, 0);
30673 + atomic_set_unchecked(&rdev->read_errors, 0);
30674 + atomic_set_unchecked(&rdev->corrected_errors, 0);
30675
30676 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
30677 if (!size) {
30678 @@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
30679 kfree(mddev);
30680 }
30681
30682 -static struct sysfs_ops md_sysfs_ops = {
30683 +static const struct sysfs_ops md_sysfs_ops = {
30684 .show = md_attr_show,
30685 .store = md_attr_store,
30686 };
30687 @@ -4474,7 +4474,8 @@ out:
30688 err = 0;
30689 blk_integrity_unregister(disk);
30690 md_new_event(mddev);
30691 - sysfs_notify_dirent(mddev->sysfs_state);
30692 + if (mddev->sysfs_state)
30693 + sysfs_notify_dirent(mddev->sysfs_state);
30694 return err;
30695 }
30696
30697 @@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
30698
30699 spin_unlock(&pers_lock);
30700 seq_printf(seq, "\n");
30701 - mi->event = atomic_read(&md_event_count);
30702 + mi->event = atomic_read_unchecked(&md_event_count);
30703 return 0;
30704 }
30705 if (v == (void*)2) {
30706 @@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
30707 chunk_kb ? "KB" : "B");
30708 if (bitmap->file) {
30709 seq_printf(seq, ", file: ");
30710 - seq_path(seq, &bitmap->file->f_path, " \t\n");
30711 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
30712 }
30713
30714 seq_printf(seq, "\n");
30715 @@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
30716 else {
30717 struct seq_file *p = file->private_data;
30718 p->private = mi;
30719 - mi->event = atomic_read(&md_event_count);
30720 + mi->event = atomic_read_unchecked(&md_event_count);
30721 }
30722 return error;
30723 }
30724 @@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
30725 /* always allow read */
30726 mask = POLLIN | POLLRDNORM;
30727
30728 - if (mi->event != atomic_read(&md_event_count))
30729 + if (mi->event != atomic_read_unchecked(&md_event_count))
30730 mask |= POLLERR | POLLPRI;
30731 return mask;
30732 }
30733 @@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
30734 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
30735 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
30736 (int)part_stat_read(&disk->part0, sectors[1]) -
30737 - atomic_read(&disk->sync_io);
30738 + atomic_read_unchecked(&disk->sync_io);
30739 /* sync IO will cause sync_io to increase before the disk_stats
30740 * as sync_io is counted when a request starts, and
30741 * disk_stats is counted when it completes.
30742 diff -urNp linux-2.6.32.42/drivers/md/md.h linux-2.6.32.42/drivers/md/md.h
30743 --- linux-2.6.32.42/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
30744 +++ linux-2.6.32.42/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
30745 @@ -94,10 +94,10 @@ struct mdk_rdev_s
30746 * only maintained for arrays that
30747 * support hot removal
30748 */
30749 - atomic_t read_errors; /* number of consecutive read errors that
30750 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
30751 * we have tried to ignore.
30752 */
30753 - atomic_t corrected_errors; /* number of corrected read errors,
30754 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
30755 * for reporting to userspace and storing
30756 * in superblock.
30757 */
30758 @@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
30759
30760 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
30761 {
30762 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
30763 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
30764 }
30765
30766 struct mdk_personality
30767 diff -urNp linux-2.6.32.42/drivers/md/raid10.c linux-2.6.32.42/drivers/md/raid10.c
30768 --- linux-2.6.32.42/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
30769 +++ linux-2.6.32.42/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
30770 @@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
30771 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
30772 set_bit(R10BIO_Uptodate, &r10_bio->state);
30773 else {
30774 - atomic_add(r10_bio->sectors,
30775 + atomic_add_unchecked(r10_bio->sectors,
30776 &conf->mirrors[d].rdev->corrected_errors);
30777 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
30778 md_error(r10_bio->mddev,
30779 @@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
30780 test_bit(In_sync, &rdev->flags)) {
30781 atomic_inc(&rdev->nr_pending);
30782 rcu_read_unlock();
30783 - atomic_add(s, &rdev->corrected_errors);
30784 + atomic_add_unchecked(s, &rdev->corrected_errors);
30785 if (sync_page_io(rdev->bdev,
30786 r10_bio->devs[sl].addr +
30787 sect + rdev->data_offset,
30788 diff -urNp linux-2.6.32.42/drivers/md/raid1.c linux-2.6.32.42/drivers/md/raid1.c
30789 --- linux-2.6.32.42/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
30790 +++ linux-2.6.32.42/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
30791 @@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
30792 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
30793 continue;
30794 rdev = conf->mirrors[d].rdev;
30795 - atomic_add(s, &rdev->corrected_errors);
30796 + atomic_add_unchecked(s, &rdev->corrected_errors);
30797 if (sync_page_io(rdev->bdev,
30798 sect + rdev->data_offset,
30799 s<<9,
30800 @@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
30801 /* Well, this device is dead */
30802 md_error(mddev, rdev);
30803 else {
30804 - atomic_add(s, &rdev->corrected_errors);
30805 + atomic_add_unchecked(s, &rdev->corrected_errors);
30806 printk(KERN_INFO
30807 "raid1:%s: read error corrected "
30808 "(%d sectors at %llu on %s)\n",
30809 diff -urNp linux-2.6.32.42/drivers/md/raid5.c linux-2.6.32.42/drivers/md/raid5.c
30810 --- linux-2.6.32.42/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
30811 +++ linux-2.6.32.42/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
30812 @@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
30813 bi->bi_next = NULL;
30814 if ((rw & WRITE) &&
30815 test_bit(R5_ReWrite, &sh->dev[i].flags))
30816 - atomic_add(STRIPE_SECTORS,
30817 + atomic_add_unchecked(STRIPE_SECTORS,
30818 &rdev->corrected_errors);
30819 generic_make_request(bi);
30820 } else {
30821 @@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
30822 clear_bit(R5_ReadError, &sh->dev[i].flags);
30823 clear_bit(R5_ReWrite, &sh->dev[i].flags);
30824 }
30825 - if (atomic_read(&conf->disks[i].rdev->read_errors))
30826 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
30827 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
30828 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
30829 } else {
30830 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
30831 int retry = 0;
30832 rdev = conf->disks[i].rdev;
30833
30834 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
30835 - atomic_inc(&rdev->read_errors);
30836 + atomic_inc_unchecked(&rdev->read_errors);
30837 if (conf->mddev->degraded >= conf->max_degraded)
30838 printk_rl(KERN_WARNING
30839 "raid5:%s: read error not correctable "
30840 @@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
30841 (unsigned long long)(sh->sector
30842 + rdev->data_offset),
30843 bdn);
30844 - else if (atomic_read(&rdev->read_errors)
30845 + else if (atomic_read_unchecked(&rdev->read_errors)
30846 > conf->max_nr_stripes)
30847 printk(KERN_WARNING
30848 "raid5:%s: Too many read errors, failing device %s.\n",
30849 @@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
30850 sector_t r_sector;
30851 struct stripe_head sh2;
30852
30853 + pax_track_stack();
30854
30855 chunk_offset = sector_div(new_sector, sectors_per_chunk);
30856 stripe = new_sector;
30857 diff -urNp linux-2.6.32.42/drivers/media/common/saa7146_hlp.c linux-2.6.32.42/drivers/media/common/saa7146_hlp.c
30858 --- linux-2.6.32.42/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
30859 +++ linux-2.6.32.42/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
30860 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
30861
30862 int x[32], y[32], w[32], h[32];
30863
30864 + pax_track_stack();
30865 +
30866 /* clear out memory */
30867 memset(&line_list[0], 0x00, sizeof(u32)*32);
30868 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
30869 diff -urNp linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
30870 --- linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
30871 +++ linux-2.6.32.42/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
30872 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
30873 u8 buf[HOST_LINK_BUF_SIZE];
30874 int i;
30875
30876 + pax_track_stack();
30877 +
30878 dprintk("%s\n", __func__);
30879
30880 /* check if we have space for a link buf in the rx_buffer */
30881 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
30882 unsigned long timeout;
30883 int written;
30884
30885 + pax_track_stack();
30886 +
30887 dprintk("%s\n", __func__);
30888
30889 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
30890 diff -urNp linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c
30891 --- linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
30892 +++ linux-2.6.32.42/drivers/media/dvb/dvb-core/dvbdev.c 2011-04-17 15:56:46.000000000 -0400
30893 @@ -191,6 +191,7 @@ int dvb_register_device(struct dvb_adapt
30894 const struct dvb_device *template, void *priv, int type)
30895 {
30896 struct dvb_device *dvbdev;
30897 + /* cannot be const */
30898 struct file_operations *dvbdevfops;
30899 struct device *clsdev;
30900 int minor;
30901 diff -urNp linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c
30902 --- linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
30903 +++ linux-2.6.32.42/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
30904 @@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
30905
30906 u8 buf[260];
30907
30908 + pax_track_stack();
30909 +
30910 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
30911 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
30912
30913 diff -urNp linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c
30914 --- linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
30915 +++ linux-2.6.32.42/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
30916 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
30917 u8 tudata[585];
30918 int i;
30919
30920 + pax_track_stack();
30921 +
30922 dprintk("Firmware is %zd bytes\n",fw->size);
30923
30924 /* Get eprom data */
30925 diff -urNp linux-2.6.32.42/drivers/media/radio/radio-cadet.c linux-2.6.32.42/drivers/media/radio/radio-cadet.c
30926 --- linux-2.6.32.42/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
30927 +++ linux-2.6.32.42/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
30928 @@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
30929 while (i < count && dev->rdsin != dev->rdsout)
30930 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
30931
30932 - if (copy_to_user(data, readbuf, i))
30933 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
30934 return -EFAULT;
30935 return i;
30936 }
30937 diff -urNp linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c
30938 --- linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
30939 +++ linux-2.6.32.42/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
30940 @@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
30941
30942 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
30943
30944 -static atomic_t cx18_instance = ATOMIC_INIT(0);
30945 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
30946
30947 /* Parameter declarations */
30948 static int cardtype[CX18_MAX_CARDS];
30949 @@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
30950 struct i2c_client c;
30951 u8 eedata[256];
30952
30953 + pax_track_stack();
30954 +
30955 memset(&c, 0, sizeof(c));
30956 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
30957 c.adapter = &cx->i2c_adap[0];
30958 @@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
30959 struct cx18 *cx;
30960
30961 /* FIXME - module parameter arrays constrain max instances */
30962 - i = atomic_inc_return(&cx18_instance) - 1;
30963 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
30964 if (i >= CX18_MAX_CARDS) {
30965 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
30966 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
30967 diff -urNp linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c
30968 --- linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
30969 +++ linux-2.6.32.42/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
30970 @@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
30971 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
30972
30973 /* ivtv instance counter */
30974 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
30975 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
30976
30977 /* Parameter declarations */
30978 static int cardtype[IVTV_MAX_CARDS];
30979 diff -urNp linux-2.6.32.42/drivers/media/video/omap24xxcam.c linux-2.6.32.42/drivers/media/video/omap24xxcam.c
30980 --- linux-2.6.32.42/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
30981 +++ linux-2.6.32.42/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
30982 @@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
30983 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
30984
30985 do_gettimeofday(&vb->ts);
30986 - vb->field_count = atomic_add_return(2, &fh->field_count);
30987 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
30988 if (csr & csr_error) {
30989 vb->state = VIDEOBUF_ERROR;
30990 if (!atomic_read(&fh->cam->in_reset)) {
30991 diff -urNp linux-2.6.32.42/drivers/media/video/omap24xxcam.h linux-2.6.32.42/drivers/media/video/omap24xxcam.h
30992 --- linux-2.6.32.42/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
30993 +++ linux-2.6.32.42/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
30994 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
30995 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
30996 struct videobuf_queue vbq;
30997 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
30998 - atomic_t field_count; /* field counter for videobuf_buffer */
30999 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
31000 /* accessing cam here doesn't need serialisation: it's constant */
31001 struct omap24xxcam_device *cam;
31002 };
31003 diff -urNp linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31004 --- linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
31005 +++ linux-2.6.32.42/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
31006 @@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
31007 u8 *eeprom;
31008 struct tveeprom tvdata;
31009
31010 + pax_track_stack();
31011 +
31012 memset(&tvdata,0,sizeof(tvdata));
31013
31014 eeprom = pvr2_eeprom_fetch(hdw);
31015 diff -urNp linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c
31016 --- linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
31017 +++ linux-2.6.32.42/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
31018 @@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
31019 unsigned char localPAT[256];
31020 unsigned char localPMT[256];
31021
31022 + pax_track_stack();
31023 +
31024 /* Set video format - must be done first as it resets other settings */
31025 set_reg8(client, 0x41, h->video_format);
31026
31027 diff -urNp linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c
31028 --- linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
31029 +++ linux-2.6.32.42/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
31030 @@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
31031 wait_queue_head_t *q = 0;
31032 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31033
31034 + pax_track_stack();
31035 +
31036 /* While any outstand message on the bus exists... */
31037 do {
31038
31039 @@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
31040 u8 tmp[512];
31041 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31042
31043 + pax_track_stack();
31044 +
31045 while (loop) {
31046
31047 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
31048 diff -urNp linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c
31049 --- linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
31050 +++ linux-2.6.32.42/drivers/media/video/usbvideo/konicawc.c 2011-04-17 15:56:46.000000000 -0400
31051 @@ -225,7 +225,7 @@ static void konicawc_register_input(stru
31052 int error;
31053
31054 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
31055 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31056 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31057
31058 cam->input = input_dev = input_allocate_device();
31059 if (!input_dev) {
31060 diff -urNp linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c
31061 --- linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
31062 +++ linux-2.6.32.42/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
31063 @@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
31064 int error;
31065
31066 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
31067 - strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31068 + strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31069
31070 cam->input = input_dev = input_allocate_device();
31071 if (!input_dev) {
31072 diff -urNp linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c
31073 --- linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
31074 +++ linux-2.6.32.42/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
31075 @@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
31076 unsigned char rv, gv, bv;
31077 static unsigned char *Y, *U, *V;
31078
31079 + pax_track_stack();
31080 +
31081 frame = usbvision->curFrame;
31082 imageSize = frame->frmwidth * frame->frmheight;
31083 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
31084 diff -urNp linux-2.6.32.42/drivers/media/video/v4l2-device.c linux-2.6.32.42/drivers/media/video/v4l2-device.c
31085 --- linux-2.6.32.42/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
31086 +++ linux-2.6.32.42/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
31087 @@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
31088 EXPORT_SYMBOL_GPL(v4l2_device_register);
31089
31090 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
31091 - atomic_t *instance)
31092 + atomic_unchecked_t *instance)
31093 {
31094 - int num = atomic_inc_return(instance) - 1;
31095 + int num = atomic_inc_return_unchecked(instance) - 1;
31096 int len = strlen(basename);
31097
31098 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
31099 diff -urNp linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c
31100 --- linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
31101 +++ linux-2.6.32.42/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
31102 @@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
31103 {
31104 struct videobuf_queue q;
31105
31106 + pax_track_stack();
31107 +
31108 /* Required to make generic handler to call __videobuf_alloc */
31109 q.int_ops = &sg_ops;
31110
31111 diff -urNp linux-2.6.32.42/drivers/message/fusion/mptbase.c linux-2.6.32.42/drivers/message/fusion/mptbase.c
31112 --- linux-2.6.32.42/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
31113 +++ linux-2.6.32.42/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
31114 @@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
31115 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31116 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31117
31118 +#ifdef CONFIG_GRKERNSEC_HIDESYM
31119 + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31120 + NULL, NULL);
31121 +#else
31122 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31123 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31124 +#endif
31125 +
31126 /*
31127 * Rounding UP to nearest 4-kB boundary here...
31128 */
31129 diff -urNp linux-2.6.32.42/drivers/message/fusion/mptsas.c linux-2.6.32.42/drivers/message/fusion/mptsas.c
31130 --- linux-2.6.32.42/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
31131 +++ linux-2.6.32.42/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
31132 @@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
31133 return 0;
31134 }
31135
31136 +static inline void
31137 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31138 +{
31139 + if (phy_info->port_details) {
31140 + phy_info->port_details->rphy = rphy;
31141 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31142 + ioc->name, rphy));
31143 + }
31144 +
31145 + if (rphy) {
31146 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31147 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31148 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31149 + ioc->name, rphy, rphy->dev.release));
31150 + }
31151 +}
31152 +
31153 /* no mutex */
31154 static void
31155 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31156 @@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
31157 return NULL;
31158 }
31159
31160 -static inline void
31161 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31162 -{
31163 - if (phy_info->port_details) {
31164 - phy_info->port_details->rphy = rphy;
31165 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31166 - ioc->name, rphy));
31167 - }
31168 -
31169 - if (rphy) {
31170 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31171 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31172 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31173 - ioc->name, rphy, rphy->dev.release));
31174 - }
31175 -}
31176 -
31177 static inline struct sas_port *
31178 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31179 {
31180 diff -urNp linux-2.6.32.42/drivers/message/fusion/mptscsih.c linux-2.6.32.42/drivers/message/fusion/mptscsih.c
31181 --- linux-2.6.32.42/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
31182 +++ linux-2.6.32.42/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
31183 @@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31184
31185 h = shost_priv(SChost);
31186
31187 - if (h) {
31188 - if (h->info_kbuf == NULL)
31189 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31190 - return h->info_kbuf;
31191 - h->info_kbuf[0] = '\0';
31192 + if (!h)
31193 + return NULL;
31194
31195 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31196 - h->info_kbuf[size-1] = '\0';
31197 - }
31198 + if (h->info_kbuf == NULL)
31199 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31200 + return h->info_kbuf;
31201 + h->info_kbuf[0] = '\0';
31202 +
31203 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31204 + h->info_kbuf[size-1] = '\0';
31205
31206 return h->info_kbuf;
31207 }
31208 diff -urNp linux-2.6.32.42/drivers/message/i2o/i2o_config.c linux-2.6.32.42/drivers/message/i2o/i2o_config.c
31209 --- linux-2.6.32.42/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
31210 +++ linux-2.6.32.42/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
31211 @@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
31212 struct i2o_message *msg;
31213 unsigned int iop;
31214
31215 + pax_track_stack();
31216 +
31217 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
31218 return -EFAULT;
31219
31220 diff -urNp linux-2.6.32.42/drivers/message/i2o/i2o_proc.c linux-2.6.32.42/drivers/message/i2o/i2o_proc.c
31221 --- linux-2.6.32.42/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
31222 +++ linux-2.6.32.42/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
31223 @@ -259,13 +259,6 @@ static char *scsi_devices[] = {
31224 "Array Controller Device"
31225 };
31226
31227 -static char *chtostr(u8 * chars, int n)
31228 -{
31229 - char tmp[256];
31230 - tmp[0] = 0;
31231 - return strncat(tmp, (char *)chars, n);
31232 -}
31233 -
31234 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31235 char *group)
31236 {
31237 @@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
31238
31239 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31240 seq_printf(seq, "%-#8x", ddm_table.module_id);
31241 - seq_printf(seq, "%-29s",
31242 - chtostr(ddm_table.module_name_version, 28));
31243 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31244 seq_printf(seq, "%9d ", ddm_table.data_size);
31245 seq_printf(seq, "%8d", ddm_table.code_size);
31246
31247 @@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
31248
31249 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31250 seq_printf(seq, "%-#8x", dst->module_id);
31251 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31252 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31253 + seq_printf(seq, "%-.28s", dst->module_name_version);
31254 + seq_printf(seq, "%-.8s", dst->date);
31255 seq_printf(seq, "%8d ", dst->module_size);
31256 seq_printf(seq, "%8d ", dst->mpb_size);
31257 seq_printf(seq, "0x%04x", dst->module_flags);
31258 @@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
31259 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31260 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31261 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31262 - seq_printf(seq, "Vendor info : %s\n",
31263 - chtostr((u8 *) (work32 + 2), 16));
31264 - seq_printf(seq, "Product info : %s\n",
31265 - chtostr((u8 *) (work32 + 6), 16));
31266 - seq_printf(seq, "Description : %s\n",
31267 - chtostr((u8 *) (work32 + 10), 16));
31268 - seq_printf(seq, "Product rev. : %s\n",
31269 - chtostr((u8 *) (work32 + 14), 8));
31270 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31271 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31272 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31273 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31274
31275 seq_printf(seq, "Serial number : ");
31276 print_serial_number(seq, (u8 *) (work32 + 16),
31277 @@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
31278 }
31279
31280 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31281 - seq_printf(seq, "Module name : %s\n",
31282 - chtostr(result.module_name, 24));
31283 - seq_printf(seq, "Module revision : %s\n",
31284 - chtostr(result.module_rev, 8));
31285 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
31286 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31287
31288 seq_printf(seq, "Serial number : ");
31289 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31290 @@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
31291 return 0;
31292 }
31293
31294 - seq_printf(seq, "Device name : %s\n",
31295 - chtostr(result.device_name, 64));
31296 - seq_printf(seq, "Service name : %s\n",
31297 - chtostr(result.service_name, 64));
31298 - seq_printf(seq, "Physical name : %s\n",
31299 - chtostr(result.physical_location, 64));
31300 - seq_printf(seq, "Instance number : %s\n",
31301 - chtostr(result.instance_number, 4));
31302 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
31303 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
31304 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31305 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31306
31307 return 0;
31308 }
31309 diff -urNp linux-2.6.32.42/drivers/message/i2o/iop.c linux-2.6.32.42/drivers/message/i2o/iop.c
31310 --- linux-2.6.32.42/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
31311 +++ linux-2.6.32.42/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
31312 @@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
31313
31314 spin_lock_irqsave(&c->context_list_lock, flags);
31315
31316 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31317 - atomic_inc(&c->context_list_counter);
31318 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31319 + atomic_inc_unchecked(&c->context_list_counter);
31320
31321 - entry->context = atomic_read(&c->context_list_counter);
31322 + entry->context = atomic_read_unchecked(&c->context_list_counter);
31323
31324 list_add(&entry->list, &c->context_list);
31325
31326 @@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
31327
31328 #if BITS_PER_LONG == 64
31329 spin_lock_init(&c->context_list_lock);
31330 - atomic_set(&c->context_list_counter, 0);
31331 + atomic_set_unchecked(&c->context_list_counter, 0);
31332 INIT_LIST_HEAD(&c->context_list);
31333 #endif
31334
31335 diff -urNp linux-2.6.32.42/drivers/mfd/wm8350-i2c.c linux-2.6.32.42/drivers/mfd/wm8350-i2c.c
31336 --- linux-2.6.32.42/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
31337 +++ linux-2.6.32.42/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
31338 @@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
31339 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
31340 int ret;
31341
31342 + pax_track_stack();
31343 +
31344 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
31345 return -EINVAL;
31346
31347 diff -urNp linux-2.6.32.42/drivers/misc/kgdbts.c linux-2.6.32.42/drivers/misc/kgdbts.c
31348 --- linux-2.6.32.42/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
31349 +++ linux-2.6.32.42/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
31350 @@ -118,7 +118,7 @@
31351 } while (0)
31352 #define MAX_CONFIG_LEN 40
31353
31354 -static struct kgdb_io kgdbts_io_ops;
31355 +static const struct kgdb_io kgdbts_io_ops;
31356 static char get_buf[BUFMAX];
31357 static int get_buf_cnt;
31358 static char put_buf[BUFMAX];
31359 @@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
31360 module_put(THIS_MODULE);
31361 }
31362
31363 -static struct kgdb_io kgdbts_io_ops = {
31364 +static const struct kgdb_io kgdbts_io_ops = {
31365 .name = "kgdbts",
31366 .read_char = kgdbts_get_char,
31367 .write_char = kgdbts_put_char,
31368 diff -urNp linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c
31369 --- linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
31370 +++ linux-2.6.32.42/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
31371 @@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
31372
31373 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
31374 {
31375 - atomic_long_inc(&mcs_op_statistics[op].count);
31376 - atomic_long_add(clks, &mcs_op_statistics[op].total);
31377 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
31378 + atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
31379 if (mcs_op_statistics[op].max < clks)
31380 mcs_op_statistics[op].max = clks;
31381 }
31382 diff -urNp linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c
31383 --- linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
31384 +++ linux-2.6.32.42/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
31385 @@ -32,9 +32,9 @@
31386
31387 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
31388
31389 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
31390 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
31391 {
31392 - unsigned long val = atomic_long_read(v);
31393 + unsigned long val = atomic_long_read_unchecked(v);
31394
31395 if (val)
31396 seq_printf(s, "%16lu %s\n", val, id);
31397 @@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
31398 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
31399
31400 for (op = 0; op < mcsop_last; op++) {
31401 - count = atomic_long_read(&mcs_op_statistics[op].count);
31402 - total = atomic_long_read(&mcs_op_statistics[op].total);
31403 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
31404 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
31405 max = mcs_op_statistics[op].max;
31406 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
31407 count ? total / count : 0, max);
31408 diff -urNp linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h
31409 --- linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
31410 +++ linux-2.6.32.42/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
31411 @@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
31412 * GRU statistics.
31413 */
31414 struct gru_stats_s {
31415 - atomic_long_t vdata_alloc;
31416 - atomic_long_t vdata_free;
31417 - atomic_long_t gts_alloc;
31418 - atomic_long_t gts_free;
31419 - atomic_long_t vdata_double_alloc;
31420 - atomic_long_t gts_double_allocate;
31421 - atomic_long_t assign_context;
31422 - atomic_long_t assign_context_failed;
31423 - atomic_long_t free_context;
31424 - atomic_long_t load_user_context;
31425 - atomic_long_t load_kernel_context;
31426 - atomic_long_t lock_kernel_context;
31427 - atomic_long_t unlock_kernel_context;
31428 - atomic_long_t steal_user_context;
31429 - atomic_long_t steal_kernel_context;
31430 - atomic_long_t steal_context_failed;
31431 - atomic_long_t nopfn;
31432 - atomic_long_t break_cow;
31433 - atomic_long_t asid_new;
31434 - atomic_long_t asid_next;
31435 - atomic_long_t asid_wrap;
31436 - atomic_long_t asid_reuse;
31437 - atomic_long_t intr;
31438 - atomic_long_t intr_mm_lock_failed;
31439 - atomic_long_t call_os;
31440 - atomic_long_t call_os_offnode_reference;
31441 - atomic_long_t call_os_check_for_bug;
31442 - atomic_long_t call_os_wait_queue;
31443 - atomic_long_t user_flush_tlb;
31444 - atomic_long_t user_unload_context;
31445 - atomic_long_t user_exception;
31446 - atomic_long_t set_context_option;
31447 - atomic_long_t migrate_check;
31448 - atomic_long_t migrated_retarget;
31449 - atomic_long_t migrated_unload;
31450 - atomic_long_t migrated_unload_delay;
31451 - atomic_long_t migrated_nopfn_retarget;
31452 - atomic_long_t migrated_nopfn_unload;
31453 - atomic_long_t tlb_dropin;
31454 - atomic_long_t tlb_dropin_fail_no_asid;
31455 - atomic_long_t tlb_dropin_fail_upm;
31456 - atomic_long_t tlb_dropin_fail_invalid;
31457 - atomic_long_t tlb_dropin_fail_range_active;
31458 - atomic_long_t tlb_dropin_fail_idle;
31459 - atomic_long_t tlb_dropin_fail_fmm;
31460 - atomic_long_t tlb_dropin_fail_no_exception;
31461 - atomic_long_t tlb_dropin_fail_no_exception_war;
31462 - atomic_long_t tfh_stale_on_fault;
31463 - atomic_long_t mmu_invalidate_range;
31464 - atomic_long_t mmu_invalidate_page;
31465 - atomic_long_t mmu_clear_flush_young;
31466 - atomic_long_t flush_tlb;
31467 - atomic_long_t flush_tlb_gru;
31468 - atomic_long_t flush_tlb_gru_tgh;
31469 - atomic_long_t flush_tlb_gru_zero_asid;
31470 -
31471 - atomic_long_t copy_gpa;
31472 -
31473 - atomic_long_t mesq_receive;
31474 - atomic_long_t mesq_receive_none;
31475 - atomic_long_t mesq_send;
31476 - atomic_long_t mesq_send_failed;
31477 - atomic_long_t mesq_noop;
31478 - atomic_long_t mesq_send_unexpected_error;
31479 - atomic_long_t mesq_send_lb_overflow;
31480 - atomic_long_t mesq_send_qlimit_reached;
31481 - atomic_long_t mesq_send_amo_nacked;
31482 - atomic_long_t mesq_send_put_nacked;
31483 - atomic_long_t mesq_qf_not_full;
31484 - atomic_long_t mesq_qf_locked;
31485 - atomic_long_t mesq_qf_noop_not_full;
31486 - atomic_long_t mesq_qf_switch_head_failed;
31487 - atomic_long_t mesq_qf_unexpected_error;
31488 - atomic_long_t mesq_noop_unexpected_error;
31489 - atomic_long_t mesq_noop_lb_overflow;
31490 - atomic_long_t mesq_noop_qlimit_reached;
31491 - atomic_long_t mesq_noop_amo_nacked;
31492 - atomic_long_t mesq_noop_put_nacked;
31493 + atomic_long_unchecked_t vdata_alloc;
31494 + atomic_long_unchecked_t vdata_free;
31495 + atomic_long_unchecked_t gts_alloc;
31496 + atomic_long_unchecked_t gts_free;
31497 + atomic_long_unchecked_t vdata_double_alloc;
31498 + atomic_long_unchecked_t gts_double_allocate;
31499 + atomic_long_unchecked_t assign_context;
31500 + atomic_long_unchecked_t assign_context_failed;
31501 + atomic_long_unchecked_t free_context;
31502 + atomic_long_unchecked_t load_user_context;
31503 + atomic_long_unchecked_t load_kernel_context;
31504 + atomic_long_unchecked_t lock_kernel_context;
31505 + atomic_long_unchecked_t unlock_kernel_context;
31506 + atomic_long_unchecked_t steal_user_context;
31507 + atomic_long_unchecked_t steal_kernel_context;
31508 + atomic_long_unchecked_t steal_context_failed;
31509 + atomic_long_unchecked_t nopfn;
31510 + atomic_long_unchecked_t break_cow;
31511 + atomic_long_unchecked_t asid_new;
31512 + atomic_long_unchecked_t asid_next;
31513 + atomic_long_unchecked_t asid_wrap;
31514 + atomic_long_unchecked_t asid_reuse;
31515 + atomic_long_unchecked_t intr;
31516 + atomic_long_unchecked_t intr_mm_lock_failed;
31517 + atomic_long_unchecked_t call_os;
31518 + atomic_long_unchecked_t call_os_offnode_reference;
31519 + atomic_long_unchecked_t call_os_check_for_bug;
31520 + atomic_long_unchecked_t call_os_wait_queue;
31521 + atomic_long_unchecked_t user_flush_tlb;
31522 + atomic_long_unchecked_t user_unload_context;
31523 + atomic_long_unchecked_t user_exception;
31524 + atomic_long_unchecked_t set_context_option;
31525 + atomic_long_unchecked_t migrate_check;
31526 + atomic_long_unchecked_t migrated_retarget;
31527 + atomic_long_unchecked_t migrated_unload;
31528 + atomic_long_unchecked_t migrated_unload_delay;
31529 + atomic_long_unchecked_t migrated_nopfn_retarget;
31530 + atomic_long_unchecked_t migrated_nopfn_unload;
31531 + atomic_long_unchecked_t tlb_dropin;
31532 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
31533 + atomic_long_unchecked_t tlb_dropin_fail_upm;
31534 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
31535 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
31536 + atomic_long_unchecked_t tlb_dropin_fail_idle;
31537 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
31538 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
31539 + atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
31540 + atomic_long_unchecked_t tfh_stale_on_fault;
31541 + atomic_long_unchecked_t mmu_invalidate_range;
31542 + atomic_long_unchecked_t mmu_invalidate_page;
31543 + atomic_long_unchecked_t mmu_clear_flush_young;
31544 + atomic_long_unchecked_t flush_tlb;
31545 + atomic_long_unchecked_t flush_tlb_gru;
31546 + atomic_long_unchecked_t flush_tlb_gru_tgh;
31547 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
31548 +
31549 + atomic_long_unchecked_t copy_gpa;
31550 +
31551 + atomic_long_unchecked_t mesq_receive;
31552 + atomic_long_unchecked_t mesq_receive_none;
31553 + atomic_long_unchecked_t mesq_send;
31554 + atomic_long_unchecked_t mesq_send_failed;
31555 + atomic_long_unchecked_t mesq_noop;
31556 + atomic_long_unchecked_t mesq_send_unexpected_error;
31557 + atomic_long_unchecked_t mesq_send_lb_overflow;
31558 + atomic_long_unchecked_t mesq_send_qlimit_reached;
31559 + atomic_long_unchecked_t mesq_send_amo_nacked;
31560 + atomic_long_unchecked_t mesq_send_put_nacked;
31561 + atomic_long_unchecked_t mesq_qf_not_full;
31562 + atomic_long_unchecked_t mesq_qf_locked;
31563 + atomic_long_unchecked_t mesq_qf_noop_not_full;
31564 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
31565 + atomic_long_unchecked_t mesq_qf_unexpected_error;
31566 + atomic_long_unchecked_t mesq_noop_unexpected_error;
31567 + atomic_long_unchecked_t mesq_noop_lb_overflow;
31568 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
31569 + atomic_long_unchecked_t mesq_noop_amo_nacked;
31570 + atomic_long_unchecked_t mesq_noop_put_nacked;
31571
31572 };
31573
31574 @@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
31575 cchop_deallocate, tghop_invalidate, mcsop_last};
31576
31577 struct mcs_op_statistic {
31578 - atomic_long_t count;
31579 - atomic_long_t total;
31580 + atomic_long_unchecked_t count;
31581 + atomic_long_unchecked_t total;
31582 unsigned long max;
31583 };
31584
31585 @@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
31586
31587 #define STAT(id) do { \
31588 if (gru_options & OPT_STATS) \
31589 - atomic_long_inc(&gru_stats.id); \
31590 + atomic_long_inc_unchecked(&gru_stats.id); \
31591 } while (0)
31592
31593 #ifdef CONFIG_SGI_GRU_DEBUG
31594 diff -urNp linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c
31595 --- linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
31596 +++ linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
31597 @@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
31598 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
31599 unsigned long timeo = jiffies + HZ;
31600
31601 + pax_track_stack();
31602 +
31603 /* Prevent setting state FL_SYNCING for chip in suspended state. */
31604 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
31605 goto sleep;
31606 @@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
31607 unsigned long initial_adr;
31608 int initial_len = len;
31609
31610 + pax_track_stack();
31611 +
31612 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
31613 adr += chip->start;
31614 initial_adr = adr;
31615 @@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
31616 int retries = 3;
31617 int ret;
31618
31619 + pax_track_stack();
31620 +
31621 adr += chip->start;
31622
31623 retry:
31624 diff -urNp linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c
31625 --- linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
31626 +++ linux-2.6.32.42/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
31627 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
31628 unsigned long cmd_addr;
31629 struct cfi_private *cfi = map->fldrv_priv;
31630
31631 + pax_track_stack();
31632 +
31633 adr += chip->start;
31634
31635 /* Ensure cmd read/writes are aligned. */
31636 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
31637 DECLARE_WAITQUEUE(wait, current);
31638 int wbufsize, z;
31639
31640 + pax_track_stack();
31641 +
31642 /* M58LW064A requires bus alignment for buffer wriets -- saw */
31643 if (adr & (map_bankwidth(map)-1))
31644 return -EINVAL;
31645 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
31646 DECLARE_WAITQUEUE(wait, current);
31647 int ret = 0;
31648
31649 + pax_track_stack();
31650 +
31651 adr += chip->start;
31652
31653 /* Let's determine this according to the interleave only once */
31654 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
31655 unsigned long timeo = jiffies + HZ;
31656 DECLARE_WAITQUEUE(wait, current);
31657
31658 + pax_track_stack();
31659 +
31660 adr += chip->start;
31661
31662 /* Let's determine this according to the interleave only once */
31663 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
31664 unsigned long timeo = jiffies + HZ;
31665 DECLARE_WAITQUEUE(wait, current);
31666
31667 + pax_track_stack();
31668 +
31669 adr += chip->start;
31670
31671 /* Let's determine this according to the interleave only once */
31672 diff -urNp linux-2.6.32.42/drivers/mtd/devices/doc2000.c linux-2.6.32.42/drivers/mtd/devices/doc2000.c
31673 --- linux-2.6.32.42/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
31674 +++ linux-2.6.32.42/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
31675 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
31676
31677 /* The ECC will not be calculated correctly if less than 512 is written */
31678 /* DBB-
31679 - if (len != 0x200 && eccbuf)
31680 + if (len != 0x200)
31681 printk(KERN_WARNING
31682 "ECC needs a full sector write (adr: %lx size %lx)\n",
31683 (long) to, (long) len);
31684 diff -urNp linux-2.6.32.42/drivers/mtd/devices/doc2001.c linux-2.6.32.42/drivers/mtd/devices/doc2001.c
31685 --- linux-2.6.32.42/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
31686 +++ linux-2.6.32.42/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
31687 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
31688 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
31689
31690 /* Don't allow read past end of device */
31691 - if (from >= this->totlen)
31692 + if (from >= this->totlen || !len)
31693 return -EINVAL;
31694
31695 /* Don't allow a single read to cross a 512-byte block boundary */
31696 diff -urNp linux-2.6.32.42/drivers/mtd/ftl.c linux-2.6.32.42/drivers/mtd/ftl.c
31697 --- linux-2.6.32.42/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
31698 +++ linux-2.6.32.42/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
31699 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
31700 loff_t offset;
31701 uint16_t srcunitswap = cpu_to_le16(srcunit);
31702
31703 + pax_track_stack();
31704 +
31705 eun = &part->EUNInfo[srcunit];
31706 xfer = &part->XferInfo[xferunit];
31707 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
31708 diff -urNp linux-2.6.32.42/drivers/mtd/inftlcore.c linux-2.6.32.42/drivers/mtd/inftlcore.c
31709 --- linux-2.6.32.42/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
31710 +++ linux-2.6.32.42/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
31711 @@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
31712 struct inftl_oob oob;
31713 size_t retlen;
31714
31715 + pax_track_stack();
31716 +
31717 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
31718 "pending=%d)\n", inftl, thisVUC, pendingblock);
31719
31720 diff -urNp linux-2.6.32.42/drivers/mtd/inftlmount.c linux-2.6.32.42/drivers/mtd/inftlmount.c
31721 --- linux-2.6.32.42/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
31722 +++ linux-2.6.32.42/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
31723 @@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
31724 struct INFTLPartition *ip;
31725 size_t retlen;
31726
31727 + pax_track_stack();
31728 +
31729 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
31730
31731 /*
31732 diff -urNp linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c
31733 --- linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
31734 +++ linux-2.6.32.42/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
31735 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
31736 {
31737 map_word pfow_val[4];
31738
31739 + pax_track_stack();
31740 +
31741 /* Check identification string */
31742 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
31743 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
31744 diff -urNp linux-2.6.32.42/drivers/mtd/mtdchar.c linux-2.6.32.42/drivers/mtd/mtdchar.c
31745 --- linux-2.6.32.42/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
31746 +++ linux-2.6.32.42/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
31747 @@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
31748 u_long size;
31749 struct mtd_info_user info;
31750
31751 + pax_track_stack();
31752 +
31753 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
31754
31755 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
31756 diff -urNp linux-2.6.32.42/drivers/mtd/nftlcore.c linux-2.6.32.42/drivers/mtd/nftlcore.c
31757 --- linux-2.6.32.42/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
31758 +++ linux-2.6.32.42/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
31759 @@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
31760 int inplace = 1;
31761 size_t retlen;
31762
31763 + pax_track_stack();
31764 +
31765 memset(BlockMap, 0xff, sizeof(BlockMap));
31766 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
31767
31768 diff -urNp linux-2.6.32.42/drivers/mtd/nftlmount.c linux-2.6.32.42/drivers/mtd/nftlmount.c
31769 --- linux-2.6.32.42/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
31770 +++ linux-2.6.32.42/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
31771 @@ -23,6 +23,7 @@
31772 #include <asm/errno.h>
31773 #include <linux/delay.h>
31774 #include <linux/slab.h>
31775 +#include <linux/sched.h>
31776 #include <linux/mtd/mtd.h>
31777 #include <linux/mtd/nand.h>
31778 #include <linux/mtd/nftl.h>
31779 @@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
31780 struct mtd_info *mtd = nftl->mbd.mtd;
31781 unsigned int i;
31782
31783 + pax_track_stack();
31784 +
31785 /* Assume logical EraseSize == physical erasesize for starting the scan.
31786 We'll sort it out later if we find a MediaHeader which says otherwise */
31787 /* Actually, we won't. The new DiskOnChip driver has already scanned
31788 diff -urNp linux-2.6.32.42/drivers/mtd/ubi/build.c linux-2.6.32.42/drivers/mtd/ubi/build.c
31789 --- linux-2.6.32.42/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
31790 +++ linux-2.6.32.42/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
31791 @@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
31792 static int __init bytes_str_to_int(const char *str)
31793 {
31794 char *endp;
31795 - unsigned long result;
31796 + unsigned long result, scale = 1;
31797
31798 result = simple_strtoul(str, &endp, 0);
31799 if (str == endp || result >= INT_MAX) {
31800 @@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
31801
31802 switch (*endp) {
31803 case 'G':
31804 - result *= 1024;
31805 + scale *= 1024;
31806 case 'M':
31807 - result *= 1024;
31808 + scale *= 1024;
31809 case 'K':
31810 - result *= 1024;
31811 + scale *= 1024;
31812 if (endp[1] == 'i' && endp[2] == 'B')
31813 endp += 2;
31814 case '\0':
31815 @@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
31816 return -EINVAL;
31817 }
31818
31819 - return result;
31820 + if ((intoverflow_t)result*scale >= INT_MAX) {
31821 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
31822 + str);
31823 + return -EINVAL;
31824 + }
31825 +
31826 + return result*scale;
31827 }
31828
31829 /**
31830 diff -urNp linux-2.6.32.42/drivers/net/bnx2.c linux-2.6.32.42/drivers/net/bnx2.c
31831 --- linux-2.6.32.42/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
31832 +++ linux-2.6.32.42/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
31833 @@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
31834 int rc = 0;
31835 u32 magic, csum;
31836
31837 + pax_track_stack();
31838 +
31839 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
31840 goto test_nvram_done;
31841
31842 diff -urNp linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c
31843 --- linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
31844 +++ linux-2.6.32.42/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
31845 @@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
31846 int i, addr, ret;
31847 struct t3_vpd vpd;
31848
31849 + pax_track_stack();
31850 +
31851 /*
31852 * Card information is normally at VPD_BASE but some early cards had
31853 * it at 0.
31854 diff -urNp linux-2.6.32.42/drivers/net/e1000e/82571.c linux-2.6.32.42/drivers/net/e1000e/82571.c
31855 --- linux-2.6.32.42/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
31856 +++ linux-2.6.32.42/drivers/net/e1000e/82571.c 2011-04-17 15:56:46.000000000 -0400
31857 @@ -212,6 +212,7 @@ static s32 e1000_init_mac_params_82571(s
31858 {
31859 struct e1000_hw *hw = &adapter->hw;
31860 struct e1000_mac_info *mac = &hw->mac;
31861 + /* cannot be const */
31862 struct e1000_mac_operations *func = &mac->ops;
31863 u32 swsm = 0;
31864 u32 swsm2 = 0;
31865 @@ -1656,7 +1657,7 @@ static void e1000_clear_hw_cntrs_82571(s
31866 temp = er32(ICRXDMTC);
31867 }
31868
31869 -static struct e1000_mac_operations e82571_mac_ops = {
31870 +static const struct e1000_mac_operations e82571_mac_ops = {
31871 /* .check_mng_mode: mac type dependent */
31872 /* .check_for_link: media type dependent */
31873 .id_led_init = e1000e_id_led_init,
31874 @@ -1674,7 +1675,7 @@ static struct e1000_mac_operations e8257
31875 .setup_led = e1000e_setup_led_generic,
31876 };
31877
31878 -static struct e1000_phy_operations e82_phy_ops_igp = {
31879 +static const struct e1000_phy_operations e82_phy_ops_igp = {
31880 .acquire_phy = e1000_get_hw_semaphore_82571,
31881 .check_reset_block = e1000e_check_reset_block_generic,
31882 .commit_phy = NULL,
31883 @@ -1691,7 +1692,7 @@ static struct e1000_phy_operations e82_p
31884 .cfg_on_link_up = NULL,
31885 };
31886
31887 -static struct e1000_phy_operations e82_phy_ops_m88 = {
31888 +static const struct e1000_phy_operations e82_phy_ops_m88 = {
31889 .acquire_phy = e1000_get_hw_semaphore_82571,
31890 .check_reset_block = e1000e_check_reset_block_generic,
31891 .commit_phy = e1000e_phy_sw_reset,
31892 @@ -1708,7 +1709,7 @@ static struct e1000_phy_operations e82_p
31893 .cfg_on_link_up = NULL,
31894 };
31895
31896 -static struct e1000_phy_operations e82_phy_ops_bm = {
31897 +static const struct e1000_phy_operations e82_phy_ops_bm = {
31898 .acquire_phy = e1000_get_hw_semaphore_82571,
31899 .check_reset_block = e1000e_check_reset_block_generic,
31900 .commit_phy = e1000e_phy_sw_reset,
31901 @@ -1725,7 +1726,7 @@ static struct e1000_phy_operations e82_p
31902 .cfg_on_link_up = NULL,
31903 };
31904
31905 -static struct e1000_nvm_operations e82571_nvm_ops = {
31906 +static const struct e1000_nvm_operations e82571_nvm_ops = {
31907 .acquire_nvm = e1000_acquire_nvm_82571,
31908 .read_nvm = e1000e_read_nvm_eerd,
31909 .release_nvm = e1000_release_nvm_82571,
31910 diff -urNp linux-2.6.32.42/drivers/net/e1000e/e1000.h linux-2.6.32.42/drivers/net/e1000e/e1000.h
31911 --- linux-2.6.32.42/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
31912 +++ linux-2.6.32.42/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
31913 @@ -375,9 +375,9 @@ struct e1000_info {
31914 u32 pba;
31915 u32 max_hw_frame_size;
31916 s32 (*get_variants)(struct e1000_adapter *);
31917 - struct e1000_mac_operations *mac_ops;
31918 - struct e1000_phy_operations *phy_ops;
31919 - struct e1000_nvm_operations *nvm_ops;
31920 + const struct e1000_mac_operations *mac_ops;
31921 + const struct e1000_phy_operations *phy_ops;
31922 + const struct e1000_nvm_operations *nvm_ops;
31923 };
31924
31925 /* hardware capability, feature, and workaround flags */
31926 diff -urNp linux-2.6.32.42/drivers/net/e1000e/es2lan.c linux-2.6.32.42/drivers/net/e1000e/es2lan.c
31927 --- linux-2.6.32.42/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
31928 +++ linux-2.6.32.42/drivers/net/e1000e/es2lan.c 2011-04-17 15:56:46.000000000 -0400
31929 @@ -207,6 +207,7 @@ static s32 e1000_init_mac_params_80003es
31930 {
31931 struct e1000_hw *hw = &adapter->hw;
31932 struct e1000_mac_info *mac = &hw->mac;
31933 + /* cannot be const */
31934 struct e1000_mac_operations *func = &mac->ops;
31935
31936 /* Set media type */
31937 @@ -1365,7 +1366,7 @@ static void e1000_clear_hw_cntrs_80003es
31938 temp = er32(ICRXDMTC);
31939 }
31940
31941 -static struct e1000_mac_operations es2_mac_ops = {
31942 +static const struct e1000_mac_operations es2_mac_ops = {
31943 .id_led_init = e1000e_id_led_init,
31944 .check_mng_mode = e1000e_check_mng_mode_generic,
31945 /* check_for_link dependent on media type */
31946 @@ -1383,7 +1384,7 @@ static struct e1000_mac_operations es2_m
31947 .setup_led = e1000e_setup_led_generic,
31948 };
31949
31950 -static struct e1000_phy_operations es2_phy_ops = {
31951 +static const struct e1000_phy_operations es2_phy_ops = {
31952 .acquire_phy = e1000_acquire_phy_80003es2lan,
31953 .check_reset_block = e1000e_check_reset_block_generic,
31954 .commit_phy = e1000e_phy_sw_reset,
31955 @@ -1400,7 +1401,7 @@ static struct e1000_phy_operations es2_p
31956 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
31957 };
31958
31959 -static struct e1000_nvm_operations es2_nvm_ops = {
31960 +static const struct e1000_nvm_operations es2_nvm_ops = {
31961 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
31962 .read_nvm = e1000e_read_nvm_eerd,
31963 .release_nvm = e1000_release_nvm_80003es2lan,
31964 diff -urNp linux-2.6.32.42/drivers/net/e1000e/hw.h linux-2.6.32.42/drivers/net/e1000e/hw.h
31965 --- linux-2.6.32.42/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
31966 +++ linux-2.6.32.42/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
31967 @@ -756,34 +756,34 @@ struct e1000_mac_operations {
31968
31969 /* Function pointers for the PHY. */
31970 struct e1000_phy_operations {
31971 - s32 (*acquire_phy)(struct e1000_hw *);
31972 - s32 (*check_polarity)(struct e1000_hw *);
31973 - s32 (*check_reset_block)(struct e1000_hw *);
31974 - s32 (*commit_phy)(struct e1000_hw *);
31975 - s32 (*force_speed_duplex)(struct e1000_hw *);
31976 - s32 (*get_cfg_done)(struct e1000_hw *hw);
31977 - s32 (*get_cable_length)(struct e1000_hw *);
31978 - s32 (*get_phy_info)(struct e1000_hw *);
31979 - s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
31980 - s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
31981 - void (*release_phy)(struct e1000_hw *);
31982 - s32 (*reset_phy)(struct e1000_hw *);
31983 - s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
31984 - s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
31985 - s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
31986 - s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
31987 - s32 (*cfg_on_link_up)(struct e1000_hw *);
31988 + s32 (* acquire_phy)(struct e1000_hw *);
31989 + s32 (* check_polarity)(struct e1000_hw *);
31990 + s32 (* check_reset_block)(struct e1000_hw *);
31991 + s32 (* commit_phy)(struct e1000_hw *);
31992 + s32 (* force_speed_duplex)(struct e1000_hw *);
31993 + s32 (* get_cfg_done)(struct e1000_hw *hw);
31994 + s32 (* get_cable_length)(struct e1000_hw *);
31995 + s32 (* get_phy_info)(struct e1000_hw *);
31996 + s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
31997 + s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
31998 + void (* release_phy)(struct e1000_hw *);
31999 + s32 (* reset_phy)(struct e1000_hw *);
32000 + s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
32001 + s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
32002 + s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
32003 + s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
32004 + s32 (* cfg_on_link_up)(struct e1000_hw *);
32005 };
32006
32007 /* Function pointers for the NVM. */
32008 struct e1000_nvm_operations {
32009 - s32 (*acquire_nvm)(struct e1000_hw *);
32010 - s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
32011 - void (*release_nvm)(struct e1000_hw *);
32012 - s32 (*update_nvm)(struct e1000_hw *);
32013 - s32 (*valid_led_default)(struct e1000_hw *, u16 *);
32014 - s32 (*validate_nvm)(struct e1000_hw *);
32015 - s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
32016 + s32 (* const acquire_nvm)(struct e1000_hw *);
32017 + s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
32018 + void (* const release_nvm)(struct e1000_hw *);
32019 + s32 (* const update_nvm)(struct e1000_hw *);
32020 + s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
32021 + s32 (* const validate_nvm)(struct e1000_hw *);
32022 + s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
32023 };
32024
32025 struct e1000_mac_info {
32026 diff -urNp linux-2.6.32.42/drivers/net/e1000e/ich8lan.c linux-2.6.32.42/drivers/net/e1000e/ich8lan.c
32027 --- linux-2.6.32.42/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
32028 +++ linux-2.6.32.42/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:32.000000000 -0400
32029 @@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
32030 }
32031 }
32032
32033 -static struct e1000_mac_operations ich8_mac_ops = {
32034 +static const struct e1000_mac_operations ich8_mac_ops = {
32035 .id_led_init = e1000e_id_led_init,
32036 .check_mng_mode = e1000_check_mng_mode_ich8lan,
32037 .check_for_link = e1000_check_for_copper_link_ich8lan,
32038 @@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
32039 /* id_led_init dependent on mac type */
32040 };
32041
32042 -static struct e1000_phy_operations ich8_phy_ops = {
32043 +static const struct e1000_phy_operations ich8_phy_ops = {
32044 .acquire_phy = e1000_acquire_swflag_ich8lan,
32045 .check_reset_block = e1000_check_reset_block_ich8lan,
32046 .commit_phy = NULL,
32047 @@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
32048 .write_phy_reg = e1000e_write_phy_reg_igp,
32049 };
32050
32051 -static struct e1000_nvm_operations ich8_nvm_ops = {
32052 +static const struct e1000_nvm_operations ich8_nvm_ops = {
32053 .acquire_nvm = e1000_acquire_nvm_ich8lan,
32054 .read_nvm = e1000_read_nvm_ich8lan,
32055 .release_nvm = e1000_release_nvm_ich8lan,
32056 diff -urNp linux-2.6.32.42/drivers/net/hamradio/6pack.c linux-2.6.32.42/drivers/net/hamradio/6pack.c
32057 --- linux-2.6.32.42/drivers/net/hamradio/6pack.c 2011-03-27 14:31:47.000000000 -0400
32058 +++ linux-2.6.32.42/drivers/net/hamradio/6pack.c 2011-05-16 21:46:57.000000000 -0400
32059 @@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
32060 unsigned char buf[512];
32061 int count1;
32062
32063 + pax_track_stack();
32064 +
32065 if (!count)
32066 return;
32067
32068 diff -urNp linux-2.6.32.42/drivers/net/ibmveth.c linux-2.6.32.42/drivers/net/ibmveth.c
32069 --- linux-2.6.32.42/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
32070 +++ linux-2.6.32.42/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
32071 @@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
32072 NULL,
32073 };
32074
32075 -static struct sysfs_ops veth_pool_ops = {
32076 +static const struct sysfs_ops veth_pool_ops = {
32077 .show = veth_pool_show,
32078 .store = veth_pool_store,
32079 };
32080 diff -urNp linux-2.6.32.42/drivers/net/igb/e1000_82575.c linux-2.6.32.42/drivers/net/igb/e1000_82575.c
32081 --- linux-2.6.32.42/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
32082 +++ linux-2.6.32.42/drivers/net/igb/e1000_82575.c 2011-04-17 15:56:46.000000000 -0400
32083 @@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
32084 wr32(E1000_VT_CTL, vt_ctl);
32085 }
32086
32087 -static struct e1000_mac_operations e1000_mac_ops_82575 = {
32088 +static const struct e1000_mac_operations e1000_mac_ops_82575 = {
32089 .reset_hw = igb_reset_hw_82575,
32090 .init_hw = igb_init_hw_82575,
32091 .check_for_link = igb_check_for_link_82575,
32092 @@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
32093 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
32094 };
32095
32096 -static struct e1000_phy_operations e1000_phy_ops_82575 = {
32097 +static const struct e1000_phy_operations e1000_phy_ops_82575 = {
32098 .acquire = igb_acquire_phy_82575,
32099 .get_cfg_done = igb_get_cfg_done_82575,
32100 .release = igb_release_phy_82575,
32101 };
32102
32103 -static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
32104 +static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
32105 .acquire = igb_acquire_nvm_82575,
32106 .read = igb_read_nvm_eerd,
32107 .release = igb_release_nvm_82575,
32108 diff -urNp linux-2.6.32.42/drivers/net/igb/e1000_hw.h linux-2.6.32.42/drivers/net/igb/e1000_hw.h
32109 --- linux-2.6.32.42/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
32110 +++ linux-2.6.32.42/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
32111 @@ -305,17 +305,17 @@ struct e1000_phy_operations {
32112 };
32113
32114 struct e1000_nvm_operations {
32115 - s32 (*acquire)(struct e1000_hw *);
32116 - s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
32117 - void (*release)(struct e1000_hw *);
32118 - s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32119 + s32 (* const acquire)(struct e1000_hw *);
32120 + s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
32121 + void (* const release)(struct e1000_hw *);
32122 + s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
32123 };
32124
32125 struct e1000_info {
32126 s32 (*get_invariants)(struct e1000_hw *);
32127 - struct e1000_mac_operations *mac_ops;
32128 - struct e1000_phy_operations *phy_ops;
32129 - struct e1000_nvm_operations *nvm_ops;
32130 + const struct e1000_mac_operations *mac_ops;
32131 + const struct e1000_phy_operations *phy_ops;
32132 + const struct e1000_nvm_operations *nvm_ops;
32133 };
32134
32135 extern const struct e1000_info e1000_82575_info;
32136 diff -urNp linux-2.6.32.42/drivers/net/iseries_veth.c linux-2.6.32.42/drivers/net/iseries_veth.c
32137 --- linux-2.6.32.42/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
32138 +++ linux-2.6.32.42/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
32139 @@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
32140 NULL
32141 };
32142
32143 -static struct sysfs_ops veth_cnx_sysfs_ops = {
32144 +static const struct sysfs_ops veth_cnx_sysfs_ops = {
32145 .show = veth_cnx_attribute_show
32146 };
32147
32148 @@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
32149 NULL
32150 };
32151
32152 -static struct sysfs_ops veth_port_sysfs_ops = {
32153 +static const struct sysfs_ops veth_port_sysfs_ops = {
32154 .show = veth_port_attribute_show
32155 };
32156
32157 diff -urNp linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c
32158 --- linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
32159 +++ linux-2.6.32.42/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
32160 @@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
32161 u32 rctl;
32162 int i;
32163
32164 + pax_track_stack();
32165 +
32166 /* Check for Promiscuous and All Multicast modes */
32167
32168 rctl = IXGB_READ_REG(hw, RCTL);
32169 diff -urNp linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c
32170 --- linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
32171 +++ linux-2.6.32.42/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
32172 @@ -260,6 +260,9 @@ void __devinit
32173 ixgb_check_options(struct ixgb_adapter *adapter)
32174 {
32175 int bd = adapter->bd_number;
32176 +
32177 + pax_track_stack();
32178 +
32179 if (bd >= IXGB_MAX_NIC) {
32180 printk(KERN_NOTICE
32181 "Warning: no configuration for board #%i\n", bd);
32182 diff -urNp linux-2.6.32.42/drivers/net/mlx4/main.c linux-2.6.32.42/drivers/net/mlx4/main.c
32183 --- linux-2.6.32.42/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
32184 +++ linux-2.6.32.42/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
32185 @@ -38,6 +38,7 @@
32186 #include <linux/errno.h>
32187 #include <linux/pci.h>
32188 #include <linux/dma-mapping.h>
32189 +#include <linux/sched.h>
32190
32191 #include <linux/mlx4/device.h>
32192 #include <linux/mlx4/doorbell.h>
32193 @@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
32194 u64 icm_size;
32195 int err;
32196
32197 + pax_track_stack();
32198 +
32199 err = mlx4_QUERY_FW(dev);
32200 if (err) {
32201 if (err == -EACCES)
32202 diff -urNp linux-2.6.32.42/drivers/net/niu.c linux-2.6.32.42/drivers/net/niu.c
32203 --- linux-2.6.32.42/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
32204 +++ linux-2.6.32.42/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
32205 @@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
32206 int i, num_irqs, err;
32207 u8 first_ldg;
32208
32209 + pax_track_stack();
32210 +
32211 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
32212 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
32213 ldg_num_map[i] = first_ldg + i;
32214 diff -urNp linux-2.6.32.42/drivers/net/pcnet32.c linux-2.6.32.42/drivers/net/pcnet32.c
32215 --- linux-2.6.32.42/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
32216 +++ linux-2.6.32.42/drivers/net/pcnet32.c 2011-04-17 15:56:46.000000000 -0400
32217 @@ -79,7 +79,7 @@ static int cards_found;
32218 /*
32219 * VLB I/O addresses
32220 */
32221 -static unsigned int pcnet32_portlist[] __initdata =
32222 +static unsigned int pcnet32_portlist[] __devinitdata =
32223 { 0x300, 0x320, 0x340, 0x360, 0 };
32224
32225 static int pcnet32_debug = 0;
32226 diff -urNp linux-2.6.32.42/drivers/net/tg3.h linux-2.6.32.42/drivers/net/tg3.h
32227 --- linux-2.6.32.42/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
32228 +++ linux-2.6.32.42/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
32229 @@ -95,6 +95,7 @@
32230 #define CHIPREV_ID_5750_A0 0x4000
32231 #define CHIPREV_ID_5750_A1 0x4001
32232 #define CHIPREV_ID_5750_A3 0x4003
32233 +#define CHIPREV_ID_5750_C1 0x4201
32234 #define CHIPREV_ID_5750_C2 0x4202
32235 #define CHIPREV_ID_5752_A0_HW 0x5000
32236 #define CHIPREV_ID_5752_A0 0x6000
32237 diff -urNp linux-2.6.32.42/drivers/net/tulip/de2104x.c linux-2.6.32.42/drivers/net/tulip/de2104x.c
32238 --- linux-2.6.32.42/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
32239 +++ linux-2.6.32.42/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
32240 @@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
32241 struct de_srom_info_leaf *il;
32242 void *bufp;
32243
32244 + pax_track_stack();
32245 +
32246 /* download entire eeprom */
32247 for (i = 0; i < DE_EEPROM_WORDS; i++)
32248 ((__le16 *)ee_data)[i] =
32249 diff -urNp linux-2.6.32.42/drivers/net/tulip/de4x5.c linux-2.6.32.42/drivers/net/tulip/de4x5.c
32250 --- linux-2.6.32.42/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
32251 +++ linux-2.6.32.42/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
32252 @@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
32253 for (i=0; i<ETH_ALEN; i++) {
32254 tmp.addr[i] = dev->dev_addr[i];
32255 }
32256 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32257 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32258 break;
32259
32260 case DE4X5_SET_HWADDR: /* Set the hardware address */
32261 @@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
32262 spin_lock_irqsave(&lp->lock, flags);
32263 memcpy(&statbuf, &lp->pktStats, ioc->len);
32264 spin_unlock_irqrestore(&lp->lock, flags);
32265 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
32266 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32267 return -EFAULT;
32268 break;
32269 }
32270 diff -urNp linux-2.6.32.42/drivers/net/usb/hso.c linux-2.6.32.42/drivers/net/usb/hso.c
32271 --- linux-2.6.32.42/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
32272 +++ linux-2.6.32.42/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
32273 @@ -71,7 +71,7 @@
32274 #include <asm/byteorder.h>
32275 #include <linux/serial_core.h>
32276 #include <linux/serial.h>
32277 -
32278 +#include <asm/local.h>
32279
32280 #define DRIVER_VERSION "1.2"
32281 #define MOD_AUTHOR "Option Wireless"
32282 @@ -258,7 +258,7 @@ struct hso_serial {
32283
32284 /* from usb_serial_port */
32285 struct tty_struct *tty;
32286 - int open_count;
32287 + local_t open_count;
32288 spinlock_t serial_lock;
32289
32290 int (*write_data) (struct hso_serial *serial);
32291 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
32292 struct urb *urb;
32293
32294 urb = serial->rx_urb[0];
32295 - if (serial->open_count > 0) {
32296 + if (local_read(&serial->open_count) > 0) {
32297 count = put_rxbuf_data(urb, serial);
32298 if (count == -1)
32299 return;
32300 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
32301 DUMP1(urb->transfer_buffer, urb->actual_length);
32302
32303 /* Anyone listening? */
32304 - if (serial->open_count == 0)
32305 + if (local_read(&serial->open_count) == 0)
32306 return;
32307
32308 if (status == 0) {
32309 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
32310 spin_unlock_irq(&serial->serial_lock);
32311
32312 /* check for port already opened, if not set the termios */
32313 - serial->open_count++;
32314 - if (serial->open_count == 1) {
32315 + if (local_inc_return(&serial->open_count) == 1) {
32316 tty->low_latency = 1;
32317 serial->rx_state = RX_IDLE;
32318 /* Force default termio settings */
32319 @@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
32320 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
32321 if (result) {
32322 hso_stop_serial_device(serial->parent);
32323 - serial->open_count--;
32324 + local_dec(&serial->open_count);
32325 kref_put(&serial->parent->ref, hso_serial_ref_free);
32326 }
32327 } else {
32328 @@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
32329
32330 /* reset the rts and dtr */
32331 /* do the actual close */
32332 - serial->open_count--;
32333 + local_dec(&serial->open_count);
32334
32335 - if (serial->open_count <= 0) {
32336 - serial->open_count = 0;
32337 + if (local_read(&serial->open_count) <= 0) {
32338 + local_set(&serial->open_count, 0);
32339 spin_lock_irq(&serial->serial_lock);
32340 if (serial->tty == tty) {
32341 serial->tty->driver_data = NULL;
32342 @@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
32343
32344 /* the actual setup */
32345 spin_lock_irqsave(&serial->serial_lock, flags);
32346 - if (serial->open_count)
32347 + if (local_read(&serial->open_count))
32348 _hso_serial_set_termios(tty, old);
32349 else
32350 tty->termios = old;
32351 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
32352 /* Start all serial ports */
32353 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
32354 if (serial_table[i] && (serial_table[i]->interface == iface)) {
32355 - if (dev2ser(serial_table[i])->open_count) {
32356 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
32357 result =
32358 hso_start_serial_device(serial_table[i], GFP_NOIO);
32359 hso_kick_transmit(dev2ser(serial_table[i]));
32360 diff -urNp linux-2.6.32.42/drivers/net/vxge/vxge-main.c linux-2.6.32.42/drivers/net/vxge/vxge-main.c
32361 --- linux-2.6.32.42/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
32362 +++ linux-2.6.32.42/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
32363 @@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
32364 struct sk_buff *completed[NR_SKB_COMPLETED];
32365 int more;
32366
32367 + pax_track_stack();
32368 +
32369 do {
32370 more = 0;
32371 skb_ptr = completed;
32372 @@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
32373 u8 mtable[256] = {0}; /* CPU to vpath mapping */
32374 int index;
32375
32376 + pax_track_stack();
32377 +
32378 /*
32379 * Filling
32380 * - itable with bucket numbers
32381 diff -urNp linux-2.6.32.42/drivers/net/wan/cycx_x25.c linux-2.6.32.42/drivers/net/wan/cycx_x25.c
32382 --- linux-2.6.32.42/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
32383 +++ linux-2.6.32.42/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
32384 @@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
32385 unsigned char hex[1024],
32386 * phex = hex;
32387
32388 + pax_track_stack();
32389 +
32390 if (len >= (sizeof(hex) / 2))
32391 len = (sizeof(hex) / 2) - 1;
32392
32393 diff -urNp linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c
32394 --- linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
32395 +++ linux-2.6.32.42/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
32396 @@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
32397 int do_autopm = 1;
32398 DECLARE_COMPLETION_ONSTACK(notif_completion);
32399
32400 + pax_track_stack();
32401 +
32402 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
32403 i2400m, ack, ack_size);
32404 BUG_ON(_ack == i2400m->bm_ack_buf);
32405 diff -urNp linux-2.6.32.42/drivers/net/wireless/airo.c linux-2.6.32.42/drivers/net/wireless/airo.c
32406 --- linux-2.6.32.42/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
32407 +++ linux-2.6.32.42/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
32408 @@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
32409 BSSListElement * loop_net;
32410 BSSListElement * tmp_net;
32411
32412 + pax_track_stack();
32413 +
32414 /* Blow away current list of scan results */
32415 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
32416 list_move_tail (&loop_net->list, &ai->network_free_list);
32417 @@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
32418 WepKeyRid wkr;
32419 int rc;
32420
32421 + pax_track_stack();
32422 +
32423 memset( &mySsid, 0, sizeof( mySsid ) );
32424 kfree (ai->flash);
32425 ai->flash = NULL;
32426 @@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
32427 __le32 *vals = stats.vals;
32428 int len;
32429
32430 + pax_track_stack();
32431 +
32432 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32433 return -ENOMEM;
32434 data = (struct proc_data *)file->private_data;
32435 @@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
32436 /* If doLoseSync is not 1, we won't do a Lose Sync */
32437 int doLoseSync = -1;
32438
32439 + pax_track_stack();
32440 +
32441 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
32442 return -ENOMEM;
32443 data = (struct proc_data *)file->private_data;
32444 @@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
32445 int i;
32446 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
32447
32448 + pax_track_stack();
32449 +
32450 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
32451 if (!qual)
32452 return -ENOMEM;
32453 @@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
32454 CapabilityRid cap_rid;
32455 __le32 *vals = stats_rid.vals;
32456
32457 + pax_track_stack();
32458 +
32459 /* Get stats out of the card */
32460 clear_bit(JOB_WSTATS, &local->jobs);
32461 if (local->power.event) {
32462 diff -urNp linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c
32463 --- linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
32464 +++ linux-2.6.32.42/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
32465 @@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
32466 unsigned int v;
32467 u64 tsf;
32468
32469 + pax_track_stack();
32470 +
32471 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
32472 len += snprintf(buf+len, sizeof(buf)-len,
32473 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
32474 @@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
32475 unsigned int len = 0;
32476 unsigned int i;
32477
32478 + pax_track_stack();
32479 +
32480 len += snprintf(buf+len, sizeof(buf)-len,
32481 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
32482
32483 diff -urNp linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c
32484 --- linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
32485 +++ linux-2.6.32.42/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
32486 @@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
32487 char buf[512];
32488 unsigned int len = 0;
32489
32490 + pax_track_stack();
32491 +
32492 len += snprintf(buf + len, sizeof(buf) - len,
32493 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
32494 len += snprintf(buf + len, sizeof(buf) - len,
32495 @@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
32496 int i;
32497 u8 addr[ETH_ALEN];
32498
32499 + pax_track_stack();
32500 +
32501 len += snprintf(buf + len, sizeof(buf) - len,
32502 "primary: %s (%s chan=%d ht=%d)\n",
32503 wiphy_name(sc->pri_wiphy->hw->wiphy),
32504 diff -urNp linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c
32505 --- linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32506 +++ linux-2.6.32.42/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32507 @@ -43,7 +43,7 @@ static struct dentry *rootdir;
32508 struct b43_debugfs_fops {
32509 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
32510 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
32511 - struct file_operations fops;
32512 + const struct file_operations fops;
32513 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
32514 size_t file_struct_offset;
32515 };
32516 diff -urNp linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c
32517 --- linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32518 +++ linux-2.6.32.42/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32519 @@ -44,7 +44,7 @@ static struct dentry *rootdir;
32520 struct b43legacy_debugfs_fops {
32521 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
32522 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
32523 - struct file_operations fops;
32524 + const struct file_operations fops;
32525 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
32526 size_t file_struct_offset;
32527 /* Take wl->irq_lock before calling read/write? */
32528 diff -urNp linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c
32529 --- linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
32530 +++ linux-2.6.32.42/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
32531 @@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
32532 int err;
32533 DECLARE_SSID_BUF(ssid);
32534
32535 + pax_track_stack();
32536 +
32537 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
32538
32539 if (ssid_len)
32540 @@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
32541 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
32542 int err;
32543
32544 + pax_track_stack();
32545 +
32546 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
32547 idx, keylen, len);
32548
32549 diff -urNp linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c
32550 --- linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
32551 +++ linux-2.6.32.42/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
32552 @@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
32553 unsigned long flags;
32554 DECLARE_SSID_BUF(ssid);
32555
32556 + pax_track_stack();
32557 +
32558 LIBIPW_DEBUG_SCAN("'%s' (%pM"
32559 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
32560 print_ssid(ssid, info_element->data, info_element->len),
32561 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c
32562 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
32563 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
32564 @@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
32565 },
32566 };
32567
32568 -static struct iwl_ops iwl1000_ops = {
32569 +static const struct iwl_ops iwl1000_ops = {
32570 .ucode = &iwl5000_ucode,
32571 .lib = &iwl1000_lib,
32572 .hcmd = &iwl5000_hcmd,
32573 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c
32574 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
32575 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
32576 @@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
32577 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
32578 };
32579
32580 -static struct iwl_ops iwl3945_ops = {
32581 +static const struct iwl_ops iwl3945_ops = {
32582 .ucode = &iwl3945_ucode,
32583 .lib = &iwl3945_lib,
32584 .hcmd = &iwl3945_hcmd,
32585 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c
32586 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
32587 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
32588 @@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
32589 },
32590 };
32591
32592 -static struct iwl_ops iwl4965_ops = {
32593 +static const struct iwl_ops iwl4965_ops = {
32594 .ucode = &iwl4965_ucode,
32595 .lib = &iwl4965_lib,
32596 .hcmd = &iwl4965_hcmd,
32597 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c
32598 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
32599 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
32600 @@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
32601 },
32602 };
32603
32604 -struct iwl_ops iwl5000_ops = {
32605 +const struct iwl_ops iwl5000_ops = {
32606 .ucode = &iwl5000_ucode,
32607 .lib = &iwl5000_lib,
32608 .hcmd = &iwl5000_hcmd,
32609 .utils = &iwl5000_hcmd_utils,
32610 };
32611
32612 -static struct iwl_ops iwl5150_ops = {
32613 +static const struct iwl_ops iwl5150_ops = {
32614 .ucode = &iwl5000_ucode,
32615 .lib = &iwl5150_lib,
32616 .hcmd = &iwl5000_hcmd,
32617 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c
32618 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
32619 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
32620 @@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
32621 .calc_rssi = iwl5000_calc_rssi,
32622 };
32623
32624 -static struct iwl_ops iwl6000_ops = {
32625 +static const struct iwl_ops iwl6000_ops = {
32626 .ucode = &iwl5000_ucode,
32627 .lib = &iwl6000_lib,
32628 .hcmd = &iwl5000_hcmd,
32629 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
32630 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
32631 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
32632 @@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
32633 u8 active_index = 0;
32634 s32 tpt = 0;
32635
32636 + pax_track_stack();
32637 +
32638 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
32639
32640 if (!ieee80211_is_data(hdr->frame_control) ||
32641 @@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
32642 u8 valid_tx_ant = 0;
32643 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
32644
32645 + pax_track_stack();
32646 +
32647 /* Override starting rate (index 0) if needed for debug purposes */
32648 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
32649
32650 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c
32651 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
32652 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
32653 @@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
32654 int pos = 0;
32655 const size_t bufsz = sizeof(buf);
32656
32657 + pax_track_stack();
32658 +
32659 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
32660 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
32661 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
32662 @@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
32663 const size_t bufsz = sizeof(buf);
32664 ssize_t ret;
32665
32666 + pax_track_stack();
32667 +
32668 for (i = 0; i < AC_NUM; i++) {
32669 pos += scnprintf(buf + pos, bufsz - pos,
32670 "\tcw_min\tcw_max\taifsn\ttxop\n");
32671 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h
32672 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
32673 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
32674 @@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
32675 #endif
32676
32677 #else
32678 -#define IWL_DEBUG(__priv, level, fmt, args...)
32679 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
32680 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
32681 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
32682 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
32683 void *p, u32 len)
32684 {}
32685 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h
32686 --- linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
32687 +++ linux-2.6.32.42/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
32688 @@ -68,7 +68,7 @@ struct iwl_tx_queue;
32689
32690 /* shared structures from iwl-5000.c */
32691 extern struct iwl_mod_params iwl50_mod_params;
32692 -extern struct iwl_ops iwl5000_ops;
32693 +extern const struct iwl_ops iwl5000_ops;
32694 extern struct iwl_ucode_ops iwl5000_ucode;
32695 extern struct iwl_lib_ops iwl5000_lib;
32696 extern struct iwl_hcmd_ops iwl5000_hcmd;
32697 diff -urNp linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c
32698 --- linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32699 +++ linux-2.6.32.42/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
32700 @@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
32701 int buf_len = 512;
32702 size_t len = 0;
32703
32704 + pax_track_stack();
32705 +
32706 if (*ppos != 0)
32707 return 0;
32708 if (count < sizeof(buf))
32709 diff -urNp linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c
32710 --- linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
32711 +++ linux-2.6.32.42/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
32712 @@ -708,7 +708,7 @@ out_unlock:
32713 struct lbs_debugfs_files {
32714 const char *name;
32715 int perm;
32716 - struct file_operations fops;
32717 + const struct file_operations fops;
32718 };
32719
32720 static const struct lbs_debugfs_files debugfs_files[] = {
32721 diff -urNp linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c
32722 --- linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
32723 +++ linux-2.6.32.42/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
32724 @@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
32725
32726 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
32727
32728 - if (rts_threshold < 0 || rts_threshold > 2347)
32729 + if (rts_threshold > 2347)
32730 rts_threshold = 2347;
32731
32732 tmp = cpu_to_le32(rts_threshold);
32733 diff -urNp linux-2.6.32.42/drivers/oprofile/buffer_sync.c linux-2.6.32.42/drivers/oprofile/buffer_sync.c
32734 --- linux-2.6.32.42/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
32735 +++ linux-2.6.32.42/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
32736 @@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
32737 if (cookie == NO_COOKIE)
32738 offset = pc;
32739 if (cookie == INVALID_COOKIE) {
32740 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32741 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32742 offset = pc;
32743 }
32744 if (cookie != last_cookie) {
32745 @@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
32746 /* add userspace sample */
32747
32748 if (!mm) {
32749 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
32750 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
32751 return 0;
32752 }
32753
32754 cookie = lookup_dcookie(mm, s->eip, &offset);
32755
32756 if (cookie == INVALID_COOKIE) {
32757 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
32758 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
32759 return 0;
32760 }
32761
32762 @@ -561,7 +561,7 @@ void sync_buffer(int cpu)
32763 /* ignore backtraces if failed to add a sample */
32764 if (state == sb_bt_start) {
32765 state = sb_bt_ignore;
32766 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
32767 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
32768 }
32769 }
32770 release_mm(mm);
32771 diff -urNp linux-2.6.32.42/drivers/oprofile/event_buffer.c linux-2.6.32.42/drivers/oprofile/event_buffer.c
32772 --- linux-2.6.32.42/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
32773 +++ linux-2.6.32.42/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
32774 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
32775 }
32776
32777 if (buffer_pos == buffer_size) {
32778 - atomic_inc(&oprofile_stats.event_lost_overflow);
32779 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
32780 return;
32781 }
32782
32783 diff -urNp linux-2.6.32.42/drivers/oprofile/oprof.c linux-2.6.32.42/drivers/oprofile/oprof.c
32784 --- linux-2.6.32.42/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
32785 +++ linux-2.6.32.42/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
32786 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
32787 if (oprofile_ops.switch_events())
32788 return;
32789
32790 - atomic_inc(&oprofile_stats.multiplex_counter);
32791 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
32792 start_switch_worker();
32793 }
32794
32795 diff -urNp linux-2.6.32.42/drivers/oprofile/oprofilefs.c linux-2.6.32.42/drivers/oprofile/oprofilefs.c
32796 --- linux-2.6.32.42/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
32797 +++ linux-2.6.32.42/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
32798 @@ -187,7 +187,7 @@ static const struct file_operations atom
32799
32800
32801 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
32802 - char const *name, atomic_t *val)
32803 + char const *name, atomic_unchecked_t *val)
32804 {
32805 struct dentry *d = __oprofilefs_create_file(sb, root, name,
32806 &atomic_ro_fops, 0444);
32807 diff -urNp linux-2.6.32.42/drivers/oprofile/oprofile_stats.c linux-2.6.32.42/drivers/oprofile/oprofile_stats.c
32808 --- linux-2.6.32.42/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
32809 +++ linux-2.6.32.42/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
32810 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
32811 cpu_buf->sample_invalid_eip = 0;
32812 }
32813
32814 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
32815 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
32816 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
32817 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
32818 - atomic_set(&oprofile_stats.multiplex_counter, 0);
32819 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
32820 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
32821 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
32822 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
32823 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
32824 }
32825
32826
32827 diff -urNp linux-2.6.32.42/drivers/oprofile/oprofile_stats.h linux-2.6.32.42/drivers/oprofile/oprofile_stats.h
32828 --- linux-2.6.32.42/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
32829 +++ linux-2.6.32.42/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
32830 @@ -13,11 +13,11 @@
32831 #include <asm/atomic.h>
32832
32833 struct oprofile_stat_struct {
32834 - atomic_t sample_lost_no_mm;
32835 - atomic_t sample_lost_no_mapping;
32836 - atomic_t bt_lost_no_mapping;
32837 - atomic_t event_lost_overflow;
32838 - atomic_t multiplex_counter;
32839 + atomic_unchecked_t sample_lost_no_mm;
32840 + atomic_unchecked_t sample_lost_no_mapping;
32841 + atomic_unchecked_t bt_lost_no_mapping;
32842 + atomic_unchecked_t event_lost_overflow;
32843 + atomic_unchecked_t multiplex_counter;
32844 };
32845
32846 extern struct oprofile_stat_struct oprofile_stats;
32847 diff -urNp linux-2.6.32.42/drivers/parisc/pdc_stable.c linux-2.6.32.42/drivers/parisc/pdc_stable.c
32848 --- linux-2.6.32.42/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
32849 +++ linux-2.6.32.42/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
32850 @@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
32851 return ret;
32852 }
32853
32854 -static struct sysfs_ops pdcspath_attr_ops = {
32855 +static const struct sysfs_ops pdcspath_attr_ops = {
32856 .show = pdcspath_attr_show,
32857 .store = pdcspath_attr_store,
32858 };
32859 diff -urNp linux-2.6.32.42/drivers/parport/procfs.c linux-2.6.32.42/drivers/parport/procfs.c
32860 --- linux-2.6.32.42/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
32861 +++ linux-2.6.32.42/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
32862 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
32863
32864 *ppos += len;
32865
32866 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
32867 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
32868 }
32869
32870 #ifdef CONFIG_PARPORT_1284
32871 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
32872
32873 *ppos += len;
32874
32875 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
32876 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
32877 }
32878 #endif /* IEEE1284.3 support. */
32879
32880 diff -urNp linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c
32881 --- linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
32882 +++ linux-2.6.32.42/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
32883 @@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
32884 }
32885
32886
32887 -static struct acpi_dock_ops acpiphp_dock_ops = {
32888 +static const struct acpi_dock_ops acpiphp_dock_ops = {
32889 .handler = handle_hotplug_event_func,
32890 };
32891
32892 diff -urNp linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c
32893 --- linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
32894 +++ linux-2.6.32.42/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
32895 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
32896
32897 void compaq_nvram_init (void __iomem *rom_start)
32898 {
32899 +
32900 +#ifndef CONFIG_PAX_KERNEXEC
32901 if (rom_start) {
32902 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
32903 }
32904 +#endif
32905 +
32906 dbg("int15 entry = %p\n", compaq_int15_entry_point);
32907
32908 /* initialize our int15 lock */
32909 diff -urNp linux-2.6.32.42/drivers/pci/hotplug/fakephp.c linux-2.6.32.42/drivers/pci/hotplug/fakephp.c
32910 --- linux-2.6.32.42/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
32911 +++ linux-2.6.32.42/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
32912 @@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
32913 }
32914
32915 static struct kobj_type legacy_ktype = {
32916 - .sysfs_ops = &(struct sysfs_ops){
32917 + .sysfs_ops = &(const struct sysfs_ops){
32918 .store = legacy_store, .show = legacy_show
32919 },
32920 .release = &legacy_release,
32921 diff -urNp linux-2.6.32.42/drivers/pci/intel-iommu.c linux-2.6.32.42/drivers/pci/intel-iommu.c
32922 --- linux-2.6.32.42/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
32923 +++ linux-2.6.32.42/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
32924 @@ -2643,7 +2643,7 @@ error:
32925 return 0;
32926 }
32927
32928 -static dma_addr_t intel_map_page(struct device *dev, struct page *page,
32929 +dma_addr_t intel_map_page(struct device *dev, struct page *page,
32930 unsigned long offset, size_t size,
32931 enum dma_data_direction dir,
32932 struct dma_attrs *attrs)
32933 @@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
32934 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
32935 }
32936
32937 -static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
32938 +void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
32939 size_t size, enum dma_data_direction dir,
32940 struct dma_attrs *attrs)
32941 {
32942 @@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
32943 }
32944 }
32945
32946 -static void *intel_alloc_coherent(struct device *hwdev, size_t size,
32947 +void *intel_alloc_coherent(struct device *hwdev, size_t size,
32948 dma_addr_t *dma_handle, gfp_t flags)
32949 {
32950 void *vaddr;
32951 @@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
32952 return NULL;
32953 }
32954
32955 -static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
32956 +void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
32957 dma_addr_t dma_handle)
32958 {
32959 int order;
32960 @@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
32961 free_pages((unsigned long)vaddr, order);
32962 }
32963
32964 -static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
32965 +void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
32966 int nelems, enum dma_data_direction dir,
32967 struct dma_attrs *attrs)
32968 {
32969 @@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
32970 return nelems;
32971 }
32972
32973 -static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
32974 +int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
32975 enum dma_data_direction dir, struct dma_attrs *attrs)
32976 {
32977 int i;
32978 @@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
32979 return nelems;
32980 }
32981
32982 -static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
32983 +int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
32984 {
32985 return !dma_addr;
32986 }
32987
32988 -struct dma_map_ops intel_dma_ops = {
32989 +const struct dma_map_ops intel_dma_ops = {
32990 .alloc_coherent = intel_alloc_coherent,
32991 .free_coherent = intel_free_coherent,
32992 .map_sg = intel_map_sg,
32993 diff -urNp linux-2.6.32.42/drivers/pci/pcie/aspm.c linux-2.6.32.42/drivers/pci/pcie/aspm.c
32994 --- linux-2.6.32.42/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
32995 +++ linux-2.6.32.42/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
32996 @@ -27,9 +27,9 @@
32997 #define MODULE_PARAM_PREFIX "pcie_aspm."
32998
32999 /* Note: those are not register definitions */
33000 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33001 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33002 -#define ASPM_STATE_L1 (4) /* L1 state */
33003 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33004 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33005 +#define ASPM_STATE_L1 (4U) /* L1 state */
33006 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33007 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33008
33009 diff -urNp linux-2.6.32.42/drivers/pci/probe.c linux-2.6.32.42/drivers/pci/probe.c
33010 --- linux-2.6.32.42/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
33011 +++ linux-2.6.32.42/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
33012 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
33013 return ret;
33014 }
33015
33016 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
33017 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
33018 struct device_attribute *attr,
33019 char *buf)
33020 {
33021 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
33022 }
33023
33024 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
33025 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
33026 struct device_attribute *attr,
33027 char *buf)
33028 {
33029 diff -urNp linux-2.6.32.42/drivers/pci/proc.c linux-2.6.32.42/drivers/pci/proc.c
33030 --- linux-2.6.32.42/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
33031 +++ linux-2.6.32.42/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
33032 @@ -480,7 +480,16 @@ static const struct file_operations proc
33033 static int __init pci_proc_init(void)
33034 {
33035 struct pci_dev *dev = NULL;
33036 +
33037 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
33038 +#ifdef CONFIG_GRKERNSEC_PROC_USER
33039 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33040 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33041 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33042 +#endif
33043 +#else
33044 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33045 +#endif
33046 proc_create("devices", 0, proc_bus_pci_dir,
33047 &proc_bus_pci_dev_operations);
33048 proc_initialized = 1;
33049 diff -urNp linux-2.6.32.42/drivers/pci/slot.c linux-2.6.32.42/drivers/pci/slot.c
33050 --- linux-2.6.32.42/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
33051 +++ linux-2.6.32.42/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
33052 @@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
33053 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
33054 }
33055
33056 -static struct sysfs_ops pci_slot_sysfs_ops = {
33057 +static const struct sysfs_ops pci_slot_sysfs_ops = {
33058 .show = pci_slot_attr_show,
33059 .store = pci_slot_attr_store,
33060 };
33061 diff -urNp linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c
33062 --- linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
33063 +++ linux-2.6.32.42/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
33064 @@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
33065 return -EFAULT;
33066 }
33067 }
33068 - buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
33069 + buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
33070 if (!buf)
33071 return -ENOMEM;
33072
33073 diff -urNp linux-2.6.32.42/drivers/platform/x86/acer-wmi.c linux-2.6.32.42/drivers/platform/x86/acer-wmi.c
33074 --- linux-2.6.32.42/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
33075 +++ linux-2.6.32.42/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
33076 @@ -918,7 +918,7 @@ static int update_bl_status(struct backl
33077 return 0;
33078 }
33079
33080 -static struct backlight_ops acer_bl_ops = {
33081 +static const struct backlight_ops acer_bl_ops = {
33082 .get_brightness = read_brightness,
33083 .update_status = update_bl_status,
33084 };
33085 diff -urNp linux-2.6.32.42/drivers/platform/x86/asus_acpi.c linux-2.6.32.42/drivers/platform/x86/asus_acpi.c
33086 --- linux-2.6.32.42/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
33087 +++ linux-2.6.32.42/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
33088 @@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
33089 return 0;
33090 }
33091
33092 -static struct backlight_ops asus_backlight_data = {
33093 +static const struct backlight_ops asus_backlight_data = {
33094 .get_brightness = read_brightness,
33095 .update_status = set_brightness_status,
33096 };
33097 diff -urNp linux-2.6.32.42/drivers/platform/x86/asus-laptop.c linux-2.6.32.42/drivers/platform/x86/asus-laptop.c
33098 --- linux-2.6.32.42/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
33099 +++ linux-2.6.32.42/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
33100 @@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
33101 */
33102 static int read_brightness(struct backlight_device *bd);
33103 static int update_bl_status(struct backlight_device *bd);
33104 -static struct backlight_ops asusbl_ops = {
33105 +static const struct backlight_ops asusbl_ops = {
33106 .get_brightness = read_brightness,
33107 .update_status = update_bl_status,
33108 };
33109 diff -urNp linux-2.6.32.42/drivers/platform/x86/compal-laptop.c linux-2.6.32.42/drivers/platform/x86/compal-laptop.c
33110 --- linux-2.6.32.42/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
33111 +++ linux-2.6.32.42/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
33112 @@ -163,7 +163,7 @@ static int bl_update_status(struct backl
33113 return set_lcd_level(b->props.brightness);
33114 }
33115
33116 -static struct backlight_ops compalbl_ops = {
33117 +static const struct backlight_ops compalbl_ops = {
33118 .get_brightness = bl_get_brightness,
33119 .update_status = bl_update_status,
33120 };
33121 diff -urNp linux-2.6.32.42/drivers/platform/x86/dell-laptop.c linux-2.6.32.42/drivers/platform/x86/dell-laptop.c
33122 --- linux-2.6.32.42/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
33123 +++ linux-2.6.32.42/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
33124 @@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
33125 return buffer.output[1];
33126 }
33127
33128 -static struct backlight_ops dell_ops = {
33129 +static const struct backlight_ops dell_ops = {
33130 .get_brightness = dell_get_intensity,
33131 .update_status = dell_send_intensity,
33132 };
33133 diff -urNp linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c
33134 --- linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
33135 +++ linux-2.6.32.42/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
33136 @@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
33137 */
33138 static int read_brightness(struct backlight_device *bd);
33139 static int update_bl_status(struct backlight_device *bd);
33140 -static struct backlight_ops eeepcbl_ops = {
33141 +static const struct backlight_ops eeepcbl_ops = {
33142 .get_brightness = read_brightness,
33143 .update_status = update_bl_status,
33144 };
33145 diff -urNp linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c
33146 --- linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
33147 +++ linux-2.6.32.42/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
33148 @@ -436,7 +436,7 @@ static int bl_update_status(struct backl
33149 return ret;
33150 }
33151
33152 -static struct backlight_ops fujitsubl_ops = {
33153 +static const struct backlight_ops fujitsubl_ops = {
33154 .get_brightness = bl_get_brightness,
33155 .update_status = bl_update_status,
33156 };
33157 diff -urNp linux-2.6.32.42/drivers/platform/x86/msi-laptop.c linux-2.6.32.42/drivers/platform/x86/msi-laptop.c
33158 --- linux-2.6.32.42/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
33159 +++ linux-2.6.32.42/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
33160 @@ -161,7 +161,7 @@ static int bl_update_status(struct backl
33161 return set_lcd_level(b->props.brightness);
33162 }
33163
33164 -static struct backlight_ops msibl_ops = {
33165 +static const struct backlight_ops msibl_ops = {
33166 .get_brightness = bl_get_brightness,
33167 .update_status = bl_update_status,
33168 };
33169 diff -urNp linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c
33170 --- linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
33171 +++ linux-2.6.32.42/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
33172 @@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
33173 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
33174 }
33175
33176 -static struct backlight_ops pcc_backlight_ops = {
33177 +static const struct backlight_ops pcc_backlight_ops = {
33178 .get_brightness = bl_get,
33179 .update_status = bl_set_status,
33180 };
33181 diff -urNp linux-2.6.32.42/drivers/platform/x86/sony-laptop.c linux-2.6.32.42/drivers/platform/x86/sony-laptop.c
33182 --- linux-2.6.32.42/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
33183 +++ linux-2.6.32.42/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
33184 @@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
33185 }
33186
33187 static struct backlight_device *sony_backlight_device;
33188 -static struct backlight_ops sony_backlight_ops = {
33189 +static const struct backlight_ops sony_backlight_ops = {
33190 .update_status = sony_backlight_update_status,
33191 .get_brightness = sony_backlight_get_brightness,
33192 };
33193 diff -urNp linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c
33194 --- linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
33195 +++ linux-2.6.32.42/drivers/platform/x86/thinkpad_acpi.c 2011-04-17 15:56:46.000000000 -0400
33196 @@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
33197 BACKLIGHT_UPDATE_HOTKEY);
33198 }
33199
33200 -static struct backlight_ops ibm_backlight_data = {
33201 +static const struct backlight_ops ibm_backlight_data = {
33202 .get_brightness = brightness_get,
33203 .update_status = brightness_update_status,
33204 };
33205 diff -urNp linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c
33206 --- linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
33207 +++ linux-2.6.32.42/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
33208 @@ -671,7 +671,7 @@ static acpi_status remove_device(void)
33209 return AE_OK;
33210 }
33211
33212 -static struct backlight_ops toshiba_backlight_data = {
33213 +static const struct backlight_ops toshiba_backlight_data = {
33214 .get_brightness = get_lcd,
33215 .update_status = set_lcd_status,
33216 };
33217 diff -urNp linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c
33218 --- linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
33219 +++ linux-2.6.32.42/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
33220 @@ -60,7 +60,7 @@ do { \
33221 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33222 } while(0)
33223
33224 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33225 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33226 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33227
33228 /*
33229 @@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
33230
33231 cpu = get_cpu();
33232 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33233 +
33234 + pax_open_kernel();
33235 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33236 + pax_close_kernel();
33237
33238 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33239 spin_lock_irqsave(&pnp_bios_lock, flags);
33240 @@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
33241 :"memory");
33242 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33243
33244 + pax_open_kernel();
33245 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33246 + pax_close_kernel();
33247 +
33248 put_cpu();
33249
33250 /* If we get here and this is set then the PnP BIOS faulted on us. */
33251 @@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
33252 return status;
33253 }
33254
33255 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
33256 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33257 {
33258 int i;
33259
33260 @@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
33261 pnp_bios_callpoint.offset = header->fields.pm16offset;
33262 pnp_bios_callpoint.segment = PNP_CS16;
33263
33264 + pax_open_kernel();
33265 +
33266 for_each_possible_cpu(i) {
33267 struct desc_struct *gdt = get_cpu_gdt_table(i);
33268 if (!gdt)
33269 @@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
33270 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33271 (unsigned long)__va(header->fields.pm16dseg));
33272 }
33273 +
33274 + pax_close_kernel();
33275 }
33276 diff -urNp linux-2.6.32.42/drivers/pnp/resource.c linux-2.6.32.42/drivers/pnp/resource.c
33277 --- linux-2.6.32.42/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
33278 +++ linux-2.6.32.42/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
33279 @@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
33280 return 1;
33281
33282 /* check if the resource is valid */
33283 - if (*irq < 0 || *irq > 15)
33284 + if (*irq > 15)
33285 return 0;
33286
33287 /* check if the resource is reserved */
33288 @@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
33289 return 1;
33290
33291 /* check if the resource is valid */
33292 - if (*dma < 0 || *dma == 4 || *dma > 7)
33293 + if (*dma == 4 || *dma > 7)
33294 return 0;
33295
33296 /* check if the resource is reserved */
33297 diff -urNp linux-2.6.32.42/drivers/rtc/rtc-dev.c linux-2.6.32.42/drivers/rtc/rtc-dev.c
33298 --- linux-2.6.32.42/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
33299 +++ linux-2.6.32.42/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
33300 @@ -14,6 +14,7 @@
33301 #include <linux/module.h>
33302 #include <linux/rtc.h>
33303 #include <linux/sched.h>
33304 +#include <linux/grsecurity.h>
33305 #include "rtc-core.h"
33306
33307 static dev_t rtc_devt;
33308 @@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
33309 if (copy_from_user(&tm, uarg, sizeof(tm)))
33310 return -EFAULT;
33311
33312 + gr_log_timechange();
33313 +
33314 return rtc_set_time(rtc, &tm);
33315
33316 case RTC_PIE_ON:
33317 diff -urNp linux-2.6.32.42/drivers/s390/cio/qdio_perf.c linux-2.6.32.42/drivers/s390/cio/qdio_perf.c
33318 --- linux-2.6.32.42/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
33319 +++ linux-2.6.32.42/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
33320 @@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
33321 static int qdio_perf_proc_show(struct seq_file *m, void *v)
33322 {
33323 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
33324 - (long)atomic_long_read(&perf_stats.qdio_int));
33325 + (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
33326 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
33327 - (long)atomic_long_read(&perf_stats.pci_int));
33328 + (long)atomic_long_read_unchecked(&perf_stats.pci_int));
33329 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
33330 - (long)atomic_long_read(&perf_stats.thin_int));
33331 + (long)atomic_long_read_unchecked(&perf_stats.thin_int));
33332 seq_printf(m, "\n");
33333 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
33334 - (long)atomic_long_read(&perf_stats.tasklet_inbound));
33335 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
33336 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
33337 - (long)atomic_long_read(&perf_stats.tasklet_outbound));
33338 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
33339 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
33340 - (long)atomic_long_read(&perf_stats.tasklet_thinint),
33341 - (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
33342 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
33343 + (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
33344 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
33345 - (long)atomic_long_read(&perf_stats.thinint_inbound),
33346 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
33347 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
33348 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
33349 seq_printf(m, "\n");
33350 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
33351 - (long)atomic_long_read(&perf_stats.siga_in));
33352 + (long)atomic_long_read_unchecked(&perf_stats.siga_in));
33353 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
33354 - (long)atomic_long_read(&perf_stats.siga_out));
33355 + (long)atomic_long_read_unchecked(&perf_stats.siga_out));
33356 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
33357 - (long)atomic_long_read(&perf_stats.siga_sync));
33358 + (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
33359 seq_printf(m, "\n");
33360 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
33361 - (long)atomic_long_read(&perf_stats.inbound_handler));
33362 + (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
33363 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
33364 - (long)atomic_long_read(&perf_stats.outbound_handler));
33365 + (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
33366 seq_printf(m, "\n");
33367 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
33368 - (long)atomic_long_read(&perf_stats.fast_requeue));
33369 + (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
33370 seq_printf(m, "Number of outbound target full condition\t: %li\n",
33371 - (long)atomic_long_read(&perf_stats.outbound_target_full));
33372 + (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
33373 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
33374 - (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
33375 + (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
33376 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
33377 - (long)atomic_long_read(&perf_stats.debug_stop_polling));
33378 + (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
33379 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
33380 - (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
33381 + (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
33382 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
33383 - (long)atomic_long_read(&perf_stats.debug_eqbs_all),
33384 - (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
33385 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
33386 + (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
33387 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
33388 - (long)atomic_long_read(&perf_stats.debug_sqbs_all),
33389 - (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
33390 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
33391 + (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
33392 seq_printf(m, "\n");
33393 return 0;
33394 }
33395 diff -urNp linux-2.6.32.42/drivers/s390/cio/qdio_perf.h linux-2.6.32.42/drivers/s390/cio/qdio_perf.h
33396 --- linux-2.6.32.42/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
33397 +++ linux-2.6.32.42/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
33398 @@ -13,46 +13,46 @@
33399
33400 struct qdio_perf_stats {
33401 /* interrupt handler calls */
33402 - atomic_long_t qdio_int;
33403 - atomic_long_t pci_int;
33404 - atomic_long_t thin_int;
33405 + atomic_long_unchecked_t qdio_int;
33406 + atomic_long_unchecked_t pci_int;
33407 + atomic_long_unchecked_t thin_int;
33408
33409 /* tasklet runs */
33410 - atomic_long_t tasklet_inbound;
33411 - atomic_long_t tasklet_outbound;
33412 - atomic_long_t tasklet_thinint;
33413 - atomic_long_t tasklet_thinint_loop;
33414 - atomic_long_t thinint_inbound;
33415 - atomic_long_t thinint_inbound_loop;
33416 - atomic_long_t thinint_inbound_loop2;
33417 + atomic_long_unchecked_t tasklet_inbound;
33418 + atomic_long_unchecked_t tasklet_outbound;
33419 + atomic_long_unchecked_t tasklet_thinint;
33420 + atomic_long_unchecked_t tasklet_thinint_loop;
33421 + atomic_long_unchecked_t thinint_inbound;
33422 + atomic_long_unchecked_t thinint_inbound_loop;
33423 + atomic_long_unchecked_t thinint_inbound_loop2;
33424
33425 /* signal adapter calls */
33426 - atomic_long_t siga_out;
33427 - atomic_long_t siga_in;
33428 - atomic_long_t siga_sync;
33429 + atomic_long_unchecked_t siga_out;
33430 + atomic_long_unchecked_t siga_in;
33431 + atomic_long_unchecked_t siga_sync;
33432
33433 /* misc */
33434 - atomic_long_t inbound_handler;
33435 - atomic_long_t outbound_handler;
33436 - atomic_long_t fast_requeue;
33437 - atomic_long_t outbound_target_full;
33438 + atomic_long_unchecked_t inbound_handler;
33439 + atomic_long_unchecked_t outbound_handler;
33440 + atomic_long_unchecked_t fast_requeue;
33441 + atomic_long_unchecked_t outbound_target_full;
33442
33443 /* for debugging */
33444 - atomic_long_t debug_tl_out_timer;
33445 - atomic_long_t debug_stop_polling;
33446 - atomic_long_t debug_eqbs_all;
33447 - atomic_long_t debug_eqbs_incomplete;
33448 - atomic_long_t debug_sqbs_all;
33449 - atomic_long_t debug_sqbs_incomplete;
33450 + atomic_long_unchecked_t debug_tl_out_timer;
33451 + atomic_long_unchecked_t debug_stop_polling;
33452 + atomic_long_unchecked_t debug_eqbs_all;
33453 + atomic_long_unchecked_t debug_eqbs_incomplete;
33454 + atomic_long_unchecked_t debug_sqbs_all;
33455 + atomic_long_unchecked_t debug_sqbs_incomplete;
33456 };
33457
33458 extern struct qdio_perf_stats perf_stats;
33459 extern int qdio_performance_stats;
33460
33461 -static inline void qdio_perf_stat_inc(atomic_long_t *count)
33462 +static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
33463 {
33464 if (qdio_performance_stats)
33465 - atomic_long_inc(count);
33466 + atomic_long_inc_unchecked(count);
33467 }
33468
33469 int qdio_setup_perf_stats(void);
33470 diff -urNp linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c
33471 --- linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
33472 +++ linux-2.6.32.42/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
33473 @@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
33474 u32 actual_fibsize64, actual_fibsize = 0;
33475 int i;
33476
33477 + pax_track_stack();
33478
33479 if (dev->in_reset) {
33480 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
33481 diff -urNp linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c
33482 --- linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
33483 +++ linux-2.6.32.42/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
33484 @@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
33485 flash_error_table[i].reason);
33486 }
33487
33488 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
33489 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
33490 asd_show_update_bios, asd_store_update_bios);
33491
33492 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
33493 diff -urNp linux-2.6.32.42/drivers/scsi/BusLogic.c linux-2.6.32.42/drivers/scsi/BusLogic.c
33494 --- linux-2.6.32.42/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
33495 +++ linux-2.6.32.42/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
33496 @@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
33497 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
33498 *PrototypeHostAdapter)
33499 {
33500 + pax_track_stack();
33501 +
33502 /*
33503 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
33504 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
33505 diff -urNp linux-2.6.32.42/drivers/scsi/dpt_i2o.c linux-2.6.32.42/drivers/scsi/dpt_i2o.c
33506 --- linux-2.6.32.42/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
33507 +++ linux-2.6.32.42/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
33508 @@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
33509 dma_addr_t addr;
33510 ulong flags = 0;
33511
33512 + pax_track_stack();
33513 +
33514 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
33515 // get user msg size in u32s
33516 if(get_user(size, &user_msg[0])){
33517 @@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
33518 s32 rcode;
33519 dma_addr_t addr;
33520
33521 + pax_track_stack();
33522 +
33523 memset(msg, 0 , sizeof(msg));
33524 len = scsi_bufflen(cmd);
33525 direction = 0x00000000;
33526 diff -urNp linux-2.6.32.42/drivers/scsi/eata.c linux-2.6.32.42/drivers/scsi/eata.c
33527 --- linux-2.6.32.42/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
33528 +++ linux-2.6.32.42/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
33529 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
33530 struct hostdata *ha;
33531 char name[16];
33532
33533 + pax_track_stack();
33534 +
33535 sprintf(name, "%s%d", driver_name, j);
33536
33537 if (!request_region(port_base, REGION_SIZE, driver_name)) {
33538 diff -urNp linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c
33539 --- linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
33540 +++ linux-2.6.32.42/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
33541 @@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
33542 size_t rlen;
33543 size_t dlen;
33544
33545 + pax_track_stack();
33546 +
33547 fiph = (struct fip_header *)skb->data;
33548 sub = fiph->fip_subcode;
33549 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
33550 diff -urNp linux-2.6.32.42/drivers/scsi/gdth.c linux-2.6.32.42/drivers/scsi/gdth.c
33551 --- linux-2.6.32.42/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
33552 +++ linux-2.6.32.42/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
33553 @@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
33554 ulong flags;
33555 gdth_ha_str *ha;
33556
33557 + pax_track_stack();
33558 +
33559 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
33560 return -EFAULT;
33561 ha = gdth_find_ha(ldrv.ionode);
33562 @@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
33563 gdth_ha_str *ha;
33564 int rval;
33565
33566 + pax_track_stack();
33567 +
33568 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
33569 res.number >= MAX_HDRIVES)
33570 return -EFAULT;
33571 @@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
33572 gdth_ha_str *ha;
33573 int rval;
33574
33575 + pax_track_stack();
33576 +
33577 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
33578 return -EFAULT;
33579 ha = gdth_find_ha(gen.ionode);
33580 @@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
33581 int i;
33582 gdth_cmd_str gdtcmd;
33583 char cmnd[MAX_COMMAND_SIZE];
33584 +
33585 + pax_track_stack();
33586 +
33587 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
33588
33589 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
33590 diff -urNp linux-2.6.32.42/drivers/scsi/gdth_proc.c linux-2.6.32.42/drivers/scsi/gdth_proc.c
33591 --- linux-2.6.32.42/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
33592 +++ linux-2.6.32.42/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
33593 @@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
33594 ulong64 paddr;
33595
33596 char cmnd[MAX_COMMAND_SIZE];
33597 +
33598 + pax_track_stack();
33599 +
33600 memset(cmnd, 0xff, 12);
33601 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
33602
33603 @@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
33604 gdth_hget_str *phg;
33605 char cmnd[MAX_COMMAND_SIZE];
33606
33607 + pax_track_stack();
33608 +
33609 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
33610 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
33611 if (!gdtcmd || !estr)
33612 diff -urNp linux-2.6.32.42/drivers/scsi/hosts.c linux-2.6.32.42/drivers/scsi/hosts.c
33613 --- linux-2.6.32.42/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
33614 +++ linux-2.6.32.42/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
33615 @@ -40,7 +40,7 @@
33616 #include "scsi_logging.h"
33617
33618
33619 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
33620 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
33621
33622
33623 static void scsi_host_cls_release(struct device *dev)
33624 @@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
33625 * subtract one because we increment first then return, but we need to
33626 * know what the next host number was before increment
33627 */
33628 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
33629 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
33630 shost->dma_channel = 0xff;
33631
33632 /* These three are default values which can be overridden */
33633 diff -urNp linux-2.6.32.42/drivers/scsi/ipr.c linux-2.6.32.42/drivers/scsi/ipr.c
33634 --- linux-2.6.32.42/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
33635 +++ linux-2.6.32.42/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
33636 @@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
33637 return true;
33638 }
33639
33640 -static struct ata_port_operations ipr_sata_ops = {
33641 +static const struct ata_port_operations ipr_sata_ops = {
33642 .phy_reset = ipr_ata_phy_reset,
33643 .hardreset = ipr_sata_reset,
33644 .post_internal_cmd = ipr_ata_post_internal,
33645 diff -urNp linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c
33646 --- linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
33647 +++ linux-2.6.32.42/drivers/scsi/libfc/fc_exch.c 2011-04-17 15:56:46.000000000 -0400
33648 @@ -86,12 +86,12 @@ struct fc_exch_mgr {
33649 * all together if not used XXX
33650 */
33651 struct {
33652 - atomic_t no_free_exch;
33653 - atomic_t no_free_exch_xid;
33654 - atomic_t xid_not_found;
33655 - atomic_t xid_busy;
33656 - atomic_t seq_not_found;
33657 - atomic_t non_bls_resp;
33658 + atomic_unchecked_t no_free_exch;
33659 + atomic_unchecked_t no_free_exch_xid;
33660 + atomic_unchecked_t xid_not_found;
33661 + atomic_unchecked_t xid_busy;
33662 + atomic_unchecked_t seq_not_found;
33663 + atomic_unchecked_t non_bls_resp;
33664 } stats;
33665 };
33666 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
33667 @@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
33668 /* allocate memory for exchange */
33669 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
33670 if (!ep) {
33671 - atomic_inc(&mp->stats.no_free_exch);
33672 + atomic_inc_unchecked(&mp->stats.no_free_exch);
33673 goto out;
33674 }
33675 memset(ep, 0, sizeof(*ep));
33676 @@ -557,7 +557,7 @@ out:
33677 return ep;
33678 err:
33679 spin_unlock_bh(&pool->lock);
33680 - atomic_inc(&mp->stats.no_free_exch_xid);
33681 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
33682 mempool_free(ep, mp->ep_pool);
33683 return NULL;
33684 }
33685 @@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33686 xid = ntohs(fh->fh_ox_id); /* we originated exch */
33687 ep = fc_exch_find(mp, xid);
33688 if (!ep) {
33689 - atomic_inc(&mp->stats.xid_not_found);
33690 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33691 reject = FC_RJT_OX_ID;
33692 goto out;
33693 }
33694 @@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33695 ep = fc_exch_find(mp, xid);
33696 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
33697 if (ep) {
33698 - atomic_inc(&mp->stats.xid_busy);
33699 + atomic_inc_unchecked(&mp->stats.xid_busy);
33700 reject = FC_RJT_RX_ID;
33701 goto rel;
33702 }
33703 @@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33704 }
33705 xid = ep->xid; /* get our XID */
33706 } else if (!ep) {
33707 - atomic_inc(&mp->stats.xid_not_found);
33708 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33709 reject = FC_RJT_RX_ID; /* XID not found */
33710 goto out;
33711 }
33712 @@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
33713 } else {
33714 sp = &ep->seq;
33715 if (sp->id != fh->fh_seq_id) {
33716 - atomic_inc(&mp->stats.seq_not_found);
33717 + atomic_inc_unchecked(&mp->stats.seq_not_found);
33718 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
33719 goto rel;
33720 }
33721 @@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
33722
33723 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
33724 if (!ep) {
33725 - atomic_inc(&mp->stats.xid_not_found);
33726 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33727 goto out;
33728 }
33729 if (ep->esb_stat & ESB_ST_COMPLETE) {
33730 - atomic_inc(&mp->stats.xid_not_found);
33731 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33732 goto out;
33733 }
33734 if (ep->rxid == FC_XID_UNKNOWN)
33735 ep->rxid = ntohs(fh->fh_rx_id);
33736 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
33737 - atomic_inc(&mp->stats.xid_not_found);
33738 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33739 goto rel;
33740 }
33741 if (ep->did != ntoh24(fh->fh_s_id) &&
33742 ep->did != FC_FID_FLOGI) {
33743 - atomic_inc(&mp->stats.xid_not_found);
33744 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33745 goto rel;
33746 }
33747 sof = fr_sof(fp);
33748 @@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
33749 } else {
33750 sp = &ep->seq;
33751 if (sp->id != fh->fh_seq_id) {
33752 - atomic_inc(&mp->stats.seq_not_found);
33753 + atomic_inc_unchecked(&mp->stats.seq_not_found);
33754 goto rel;
33755 }
33756 }
33757 @@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
33758 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
33759
33760 if (!sp)
33761 - atomic_inc(&mp->stats.xid_not_found);
33762 + atomic_inc_unchecked(&mp->stats.xid_not_found);
33763 else
33764 - atomic_inc(&mp->stats.non_bls_resp);
33765 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
33766
33767 fc_frame_free(fp);
33768 }
33769 diff -urNp linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c
33770 --- linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
33771 +++ linux-2.6.32.42/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
33772 @@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
33773 }
33774 }
33775
33776 -static struct ata_port_operations sas_sata_ops = {
33777 +static const struct ata_port_operations sas_sata_ops = {
33778 .phy_reset = sas_ata_phy_reset,
33779 .post_internal_cmd = sas_ata_post_internal,
33780 .qc_defer = ata_std_qc_defer,
33781 diff -urNp linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c
33782 --- linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
33783 +++ linux-2.6.32.42/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
33784 @@ -124,7 +124,7 @@ struct lpfc_debug {
33785 int len;
33786 };
33787
33788 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33789 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
33790 static unsigned long lpfc_debugfs_start_time = 0L;
33791
33792 /**
33793 @@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
33794 lpfc_debugfs_enable = 0;
33795
33796 len = 0;
33797 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
33798 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
33799 (lpfc_debugfs_max_disc_trc - 1);
33800 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
33801 dtp = vport->disc_trc + i;
33802 @@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
33803 lpfc_debugfs_enable = 0;
33804
33805 len = 0;
33806 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
33807 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
33808 (lpfc_debugfs_max_slow_ring_trc - 1);
33809 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
33810 dtp = phba->slow_ring_trc + i;
33811 @@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
33812 uint32_t *ptr;
33813 char buffer[1024];
33814
33815 + pax_track_stack();
33816 +
33817 off = 0;
33818 spin_lock_irq(&phba->hbalock);
33819
33820 @@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
33821 !vport || !vport->disc_trc)
33822 return;
33823
33824 - index = atomic_inc_return(&vport->disc_trc_cnt) &
33825 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
33826 (lpfc_debugfs_max_disc_trc - 1);
33827 dtp = vport->disc_trc + index;
33828 dtp->fmt = fmt;
33829 dtp->data1 = data1;
33830 dtp->data2 = data2;
33831 dtp->data3 = data3;
33832 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33833 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33834 dtp->jif = jiffies;
33835 #endif
33836 return;
33837 @@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
33838 !phba || !phba->slow_ring_trc)
33839 return;
33840
33841 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
33842 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
33843 (lpfc_debugfs_max_slow_ring_trc - 1);
33844 dtp = phba->slow_ring_trc + index;
33845 dtp->fmt = fmt;
33846 dtp->data1 = data1;
33847 dtp->data2 = data2;
33848 dtp->data3 = data3;
33849 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
33850 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
33851 dtp->jif = jiffies;
33852 #endif
33853 return;
33854 @@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33855 "slow_ring buffer\n");
33856 goto debug_failed;
33857 }
33858 - atomic_set(&phba->slow_ring_trc_cnt, 0);
33859 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
33860 memset(phba->slow_ring_trc, 0,
33861 (sizeof(struct lpfc_debugfs_trc) *
33862 lpfc_debugfs_max_slow_ring_trc));
33863 @@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
33864 "buffer\n");
33865 goto debug_failed;
33866 }
33867 - atomic_set(&vport->disc_trc_cnt, 0);
33868 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
33869
33870 snprintf(name, sizeof(name), "discovery_trace");
33871 vport->debug_disc_trc =
33872 diff -urNp linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h
33873 --- linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
33874 +++ linux-2.6.32.42/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
33875 @@ -400,7 +400,7 @@ struct lpfc_vport {
33876 struct dentry *debug_nodelist;
33877 struct dentry *vport_debugfs_root;
33878 struct lpfc_debugfs_trc *disc_trc;
33879 - atomic_t disc_trc_cnt;
33880 + atomic_unchecked_t disc_trc_cnt;
33881 #endif
33882 uint8_t stat_data_enabled;
33883 uint8_t stat_data_blocked;
33884 @@ -725,8 +725,8 @@ struct lpfc_hba {
33885 struct timer_list fabric_block_timer;
33886 unsigned long bit_flags;
33887 #define FABRIC_COMANDS_BLOCKED 0
33888 - atomic_t num_rsrc_err;
33889 - atomic_t num_cmd_success;
33890 + atomic_unchecked_t num_rsrc_err;
33891 + atomic_unchecked_t num_cmd_success;
33892 unsigned long last_rsrc_error_time;
33893 unsigned long last_ramp_down_time;
33894 unsigned long last_ramp_up_time;
33895 @@ -740,7 +740,7 @@ struct lpfc_hba {
33896 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
33897 struct dentry *debug_slow_ring_trc;
33898 struct lpfc_debugfs_trc *slow_ring_trc;
33899 - atomic_t slow_ring_trc_cnt;
33900 + atomic_unchecked_t slow_ring_trc_cnt;
33901 #endif
33902
33903 /* Used for deferred freeing of ELS data buffers */
33904 diff -urNp linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c
33905 --- linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
33906 +++ linux-2.6.32.42/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
33907 @@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
33908 uint32_t evt_posted;
33909
33910 spin_lock_irqsave(&phba->hbalock, flags);
33911 - atomic_inc(&phba->num_rsrc_err);
33912 + atomic_inc_unchecked(&phba->num_rsrc_err);
33913 phba->last_rsrc_error_time = jiffies;
33914
33915 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
33916 @@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
33917 unsigned long flags;
33918 struct lpfc_hba *phba = vport->phba;
33919 uint32_t evt_posted;
33920 - atomic_inc(&phba->num_cmd_success);
33921 + atomic_inc_unchecked(&phba->num_cmd_success);
33922
33923 if (vport->cfg_lun_queue_depth <= queue_depth)
33924 return;
33925 @@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33926 int i;
33927 struct lpfc_rport_data *rdata;
33928
33929 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
33930 - num_cmd_success = atomic_read(&phba->num_cmd_success);
33931 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
33932 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
33933
33934 vports = lpfc_create_vport_work_array(phba);
33935 if (vports != NULL)
33936 @@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
33937 }
33938 }
33939 lpfc_destroy_vport_work_array(phba, vports);
33940 - atomic_set(&phba->num_rsrc_err, 0);
33941 - atomic_set(&phba->num_cmd_success, 0);
33942 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
33943 + atomic_set_unchecked(&phba->num_cmd_success, 0);
33944 }
33945
33946 /**
33947 @@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
33948 }
33949 }
33950 lpfc_destroy_vport_work_array(phba, vports);
33951 - atomic_set(&phba->num_rsrc_err, 0);
33952 - atomic_set(&phba->num_cmd_success, 0);
33953 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
33954 + atomic_set_unchecked(&phba->num_cmd_success, 0);
33955 }
33956
33957 /**
33958 diff -urNp linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c
33959 --- linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
33960 +++ linux-2.6.32.42/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
33961 @@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
33962 int rval;
33963 int i;
33964
33965 + pax_track_stack();
33966 +
33967 // Allocate memory for the base list of scb for management module.
33968 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
33969
33970 diff -urNp linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c
33971 --- linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
33972 +++ linux-2.6.32.42/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
33973 @@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
33974 int nelem = ARRAY_SIZE(get_attrs), a = 0;
33975 int ret;
33976
33977 + pax_track_stack();
33978 +
33979 or = osd_start_request(od, GFP_KERNEL);
33980 if (!or)
33981 return -ENOMEM;
33982 diff -urNp linux-2.6.32.42/drivers/scsi/pmcraid.c linux-2.6.32.42/drivers/scsi/pmcraid.c
33983 --- linux-2.6.32.42/drivers/scsi/pmcraid.c 2011-05-10 22:12:01.000000000 -0400
33984 +++ linux-2.6.32.42/drivers/scsi/pmcraid.c 2011-05-10 22:12:33.000000000 -0400
33985 @@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
33986 res->scsi_dev = scsi_dev;
33987 scsi_dev->hostdata = res;
33988 res->change_detected = 0;
33989 - atomic_set(&res->read_failures, 0);
33990 - atomic_set(&res->write_failures, 0);
33991 + atomic_set_unchecked(&res->read_failures, 0);
33992 + atomic_set_unchecked(&res->write_failures, 0);
33993 rc = 0;
33994 }
33995 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
33996 @@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
33997
33998 /* If this was a SCSI read/write command keep count of errors */
33999 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34000 - atomic_inc(&res->read_failures);
34001 + atomic_inc_unchecked(&res->read_failures);
34002 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34003 - atomic_inc(&res->write_failures);
34004 + atomic_inc_unchecked(&res->write_failures);
34005
34006 if (!RES_IS_GSCSI(res->cfg_entry) &&
34007 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34008 @@ -4113,7 +4113,7 @@ static void pmcraid_worker_function(stru
34009
34010 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34011 /* add resources only after host is added into system */
34012 - if (!atomic_read(&pinstance->expose_resources))
34013 + if (!atomic_read_unchecked(&pinstance->expose_resources))
34014 return;
34015
34016 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
34017 @@ -4847,7 +4847,7 @@ static int __devinit pmcraid_init_instan
34018 init_waitqueue_head(&pinstance->reset_wait_q);
34019
34020 atomic_set(&pinstance->outstanding_cmds, 0);
34021 - atomic_set(&pinstance->expose_resources, 0);
34022 + atomic_set_unchecked(&pinstance->expose_resources, 0);
34023
34024 INIT_LIST_HEAD(&pinstance->free_res_q);
34025 INIT_LIST_HEAD(&pinstance->used_res_q);
34026 @@ -5499,7 +5499,7 @@ static int __devinit pmcraid_probe(
34027 /* Schedule worker thread to handle CCN and take care of adding and
34028 * removing devices to OS
34029 */
34030 - atomic_set(&pinstance->expose_resources, 1);
34031 + atomic_set_unchecked(&pinstance->expose_resources, 1);
34032 schedule_work(&pinstance->worker_q);
34033 return rc;
34034
34035 diff -urNp linux-2.6.32.42/drivers/scsi/pmcraid.h linux-2.6.32.42/drivers/scsi/pmcraid.h
34036 --- linux-2.6.32.42/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
34037 +++ linux-2.6.32.42/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
34038 @@ -690,7 +690,7 @@ struct pmcraid_instance {
34039 atomic_t outstanding_cmds;
34040
34041 /* should add/delete resources to mid-layer now ?*/
34042 - atomic_t expose_resources;
34043 + atomic_unchecked_t expose_resources;
34044
34045 /* Tasklet to handle deferred processing */
34046 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
34047 @@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
34048 struct list_head queue; /* link to "to be exposed" resources */
34049 struct pmcraid_config_table_entry cfg_entry;
34050 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34051 - atomic_t read_failures; /* count of failed READ commands */
34052 - atomic_t write_failures; /* count of failed WRITE commands */
34053 + atomic_unchecked_t read_failures; /* count of failed READ commands */
34054 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34055
34056 /* To indicate add/delete/modify during CCN */
34057 u8 change_detected;
34058 diff -urNp linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h
34059 --- linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
34060 +++ linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
34061 @@ -240,7 +240,7 @@ struct ddb_entry {
34062 atomic_t retry_relogin_timer; /* Min Time between relogins
34063 * (4000 only) */
34064 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
34065 - atomic_t relogin_retry_count; /* Num of times relogin has been
34066 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34067 * retried */
34068
34069 uint16_t port;
34070 diff -urNp linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c
34071 --- linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
34072 +++ linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
34073 @@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
34074 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
34075 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34076 atomic_set(&ddb_entry->relogin_timer, 0);
34077 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34078 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34079 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34080 list_add_tail(&ddb_entry->list, &ha->ddb_list);
34081 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
34082 @@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
34083 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34084 atomic_set(&ddb_entry->port_down_timer,
34085 ha->port_down_retry_count);
34086 - atomic_set(&ddb_entry->relogin_retry_count, 0);
34087 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34088 atomic_set(&ddb_entry->relogin_timer, 0);
34089 clear_bit(DF_RELOGIN, &ddb_entry->flags);
34090 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
34091 diff -urNp linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c
34092 --- linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
34093 +++ linux-2.6.32.42/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
34094 @@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
34095 ddb_entry->fw_ddb_device_state ==
34096 DDB_DS_SESSION_FAILED) {
34097 /* Reset retry relogin timer */
34098 - atomic_inc(&ddb_entry->relogin_retry_count);
34099 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34100 DEBUG2(printk("scsi%ld: index[%d] relogin"
34101 " timed out-retrying"
34102 " relogin (%d)\n",
34103 ha->host_no,
34104 ddb_entry->fw_ddb_index,
34105 - atomic_read(&ddb_entry->
34106 + atomic_read_unchecked(&ddb_entry->
34107 relogin_retry_count))
34108 );
34109 start_dpc++;
34110 diff -urNp linux-2.6.32.42/drivers/scsi/scsi.c linux-2.6.32.42/drivers/scsi/scsi.c
34111 --- linux-2.6.32.42/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
34112 +++ linux-2.6.32.42/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
34113 @@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
34114 unsigned long timeout;
34115 int rtn = 0;
34116
34117 - atomic_inc(&cmd->device->iorequest_cnt);
34118 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34119
34120 /* check if the device is still usable */
34121 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34122 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_debug.c linux-2.6.32.42/drivers/scsi/scsi_debug.c
34123 --- linux-2.6.32.42/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
34124 +++ linux-2.6.32.42/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
34125 @@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
34126 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
34127 unsigned char *cmd = (unsigned char *)scp->cmnd;
34128
34129 + pax_track_stack();
34130 +
34131 if ((errsts = check_readiness(scp, 1, devip)))
34132 return errsts;
34133 memset(arr, 0, sizeof(arr));
34134 @@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
34135 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
34136 unsigned char *cmd = (unsigned char *)scp->cmnd;
34137
34138 + pax_track_stack();
34139 +
34140 if ((errsts = check_readiness(scp, 1, devip)))
34141 return errsts;
34142 memset(arr, 0, sizeof(arr));
34143 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_lib.c linux-2.6.32.42/drivers/scsi/scsi_lib.c
34144 --- linux-2.6.32.42/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
34145 +++ linux-2.6.32.42/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
34146 @@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
34147
34148 scsi_init_cmd_errh(cmd);
34149 cmd->result = DID_NO_CONNECT << 16;
34150 - atomic_inc(&cmd->device->iorequest_cnt);
34151 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34152
34153 /*
34154 * SCSI request completion path will do scsi_device_unbusy(),
34155 @@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
34156 */
34157 cmd->serial_number = 0;
34158
34159 - atomic_inc(&cmd->device->iodone_cnt);
34160 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
34161 if (cmd->result)
34162 - atomic_inc(&cmd->device->ioerr_cnt);
34163 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34164
34165 disposition = scsi_decide_disposition(cmd);
34166 if (disposition != SUCCESS &&
34167 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_sysfs.c linux-2.6.32.42/drivers/scsi/scsi_sysfs.c
34168 --- linux-2.6.32.42/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
34169 +++ linux-2.6.32.42/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
34170 @@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
34171 char *buf) \
34172 { \
34173 struct scsi_device *sdev = to_scsi_device(dev); \
34174 - unsigned long long count = atomic_read(&sdev->field); \
34175 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
34176 return snprintf(buf, 20, "0x%llx\n", count); \
34177 } \
34178 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34179 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c
34180 --- linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
34181 +++ linux-2.6.32.42/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
34182 @@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
34183 * Netlink Infrastructure
34184 */
34185
34186 -static atomic_t fc_event_seq;
34187 +static atomic_unchecked_t fc_event_seq;
34188
34189 /**
34190 * fc_get_event_number - Obtain the next sequential FC event number
34191 @@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
34192 u32
34193 fc_get_event_number(void)
34194 {
34195 - return atomic_add_return(1, &fc_event_seq);
34196 + return atomic_add_return_unchecked(1, &fc_event_seq);
34197 }
34198 EXPORT_SYMBOL(fc_get_event_number);
34199
34200 @@ -641,7 +641,7 @@ static __init int fc_transport_init(void
34201 {
34202 int error;
34203
34204 - atomic_set(&fc_event_seq, 0);
34205 + atomic_set_unchecked(&fc_event_seq, 0);
34206
34207 error = transport_class_register(&fc_host_class);
34208 if (error)
34209 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c
34210 --- linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
34211 +++ linux-2.6.32.42/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
34212 @@ -81,7 +81,7 @@ struct iscsi_internal {
34213 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34214 };
34215
34216 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34217 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34218 static struct workqueue_struct *iscsi_eh_timer_workq;
34219
34220 /*
34221 @@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
34222 int err;
34223
34224 ihost = shost->shost_data;
34225 - session->sid = atomic_add_return(1, &iscsi_session_nr);
34226 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34227
34228 if (id == ISCSI_MAX_TARGET) {
34229 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34230 @@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
34231 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34232 ISCSI_TRANSPORT_VERSION);
34233
34234 - atomic_set(&iscsi_session_nr, 0);
34235 + atomic_set_unchecked(&iscsi_session_nr, 0);
34236
34237 err = class_register(&iscsi_transport_class);
34238 if (err)
34239 diff -urNp linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c
34240 --- linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
34241 +++ linux-2.6.32.42/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
34242 @@ -33,7 +33,7 @@
34243 #include "scsi_transport_srp_internal.h"
34244
34245 struct srp_host_attrs {
34246 - atomic_t next_port_id;
34247 + atomic_unchecked_t next_port_id;
34248 };
34249 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34250
34251 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34252 struct Scsi_Host *shost = dev_to_shost(dev);
34253 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34254
34255 - atomic_set(&srp_host->next_port_id, 0);
34256 + atomic_set_unchecked(&srp_host->next_port_id, 0);
34257 return 0;
34258 }
34259
34260 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34261 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34262 rport->roles = ids->roles;
34263
34264 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34265 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34266 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34267
34268 transport_setup_device(&rport->dev);
34269 diff -urNp linux-2.6.32.42/drivers/scsi/sg.c linux-2.6.32.42/drivers/scsi/sg.c
34270 --- linux-2.6.32.42/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
34271 +++ linux-2.6.32.42/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
34272 @@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
34273 const struct file_operations * fops;
34274 };
34275
34276 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34277 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34278 {"allow_dio", &adio_fops},
34279 {"debug", &debug_fops},
34280 {"def_reserved_size", &dressz_fops},
34281 @@ -2307,7 +2307,7 @@ sg_proc_init(void)
34282 {
34283 int k, mask;
34284 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34285 - struct sg_proc_leaf * leaf;
34286 + const struct sg_proc_leaf * leaf;
34287
34288 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34289 if (!sg_proc_sgp)
34290 diff -urNp linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c
34291 --- linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
34292 +++ linux-2.6.32.42/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
34293 @@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
34294 int do_iounmap = 0;
34295 int do_disable_device = 1;
34296
34297 + pax_track_stack();
34298 +
34299 memset(&sym_dev, 0, sizeof(sym_dev));
34300 memset(&nvram, 0, sizeof(nvram));
34301 sym_dev.pdev = pdev;
34302 diff -urNp linux-2.6.32.42/drivers/serial/kgdboc.c linux-2.6.32.42/drivers/serial/kgdboc.c
34303 --- linux-2.6.32.42/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
34304 +++ linux-2.6.32.42/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
34305 @@ -18,7 +18,7 @@
34306
34307 #define MAX_CONFIG_LEN 40
34308
34309 -static struct kgdb_io kgdboc_io_ops;
34310 +static const struct kgdb_io kgdboc_io_ops;
34311
34312 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
34313 static int configured = -1;
34314 @@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
34315 module_put(THIS_MODULE);
34316 }
34317
34318 -static struct kgdb_io kgdboc_io_ops = {
34319 +static const struct kgdb_io kgdboc_io_ops = {
34320 .name = "kgdboc",
34321 .read_char = kgdboc_get_char,
34322 .write_char = kgdboc_put_char,
34323 diff -urNp linux-2.6.32.42/drivers/spi/spi.c linux-2.6.32.42/drivers/spi/spi.c
34324 --- linux-2.6.32.42/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
34325 +++ linux-2.6.32.42/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
34326 @@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
34327 EXPORT_SYMBOL_GPL(spi_sync);
34328
34329 /* portable code must never pass more than 32 bytes */
34330 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
34331 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
34332
34333 static u8 *buf;
34334
34335 diff -urNp linux-2.6.32.42/drivers/staging/android/binder.c linux-2.6.32.42/drivers/staging/android/binder.c
34336 --- linux-2.6.32.42/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
34337 +++ linux-2.6.32.42/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
34338 @@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
34339 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
34340 }
34341
34342 -static struct vm_operations_struct binder_vm_ops = {
34343 +static const struct vm_operations_struct binder_vm_ops = {
34344 .open = binder_vma_open,
34345 .close = binder_vma_close,
34346 };
34347 diff -urNp linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c
34348 --- linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
34349 +++ linux-2.6.32.42/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
34350 @@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
34351 return VM_FAULT_NOPAGE;
34352 }
34353
34354 -static struct vm_operations_struct b3dfg_vm_ops = {
34355 +static const struct vm_operations_struct b3dfg_vm_ops = {
34356 .fault = b3dfg_vma_fault,
34357 };
34358
34359 @@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
34360 return r;
34361 }
34362
34363 -static struct file_operations b3dfg_fops = {
34364 +static const struct file_operations b3dfg_fops = {
34365 .owner = THIS_MODULE,
34366 .open = b3dfg_open,
34367 .release = b3dfg_release,
34368 diff -urNp linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c
34369 --- linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c 2011-03-27 14:31:47.000000000 -0400
34370 +++ linux-2.6.32.42/drivers/staging/comedi/comedi_fops.c 2011-04-17 15:56:46.000000000 -0400
34371 @@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
34372 mutex_unlock(&dev->mutex);
34373 }
34374
34375 -static struct vm_operations_struct comedi_vm_ops = {
34376 +static const struct vm_operations_struct comedi_vm_ops = {
34377 .close = comedi_unmap,
34378 };
34379
34380 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c
34381 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
34382 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
34383 @@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
34384 static dev_t adsp_devno;
34385 static struct class *adsp_class;
34386
34387 -static struct file_operations adsp_fops = {
34388 +static const struct file_operations adsp_fops = {
34389 .owner = THIS_MODULE,
34390 .open = adsp_open,
34391 .unlocked_ioctl = adsp_ioctl,
34392 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c
34393 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
34394 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
34395 @@ -1022,7 +1022,7 @@ done:
34396 return rc;
34397 }
34398
34399 -static struct file_operations audio_aac_fops = {
34400 +static const struct file_operations audio_aac_fops = {
34401 .owner = THIS_MODULE,
34402 .open = audio_open,
34403 .release = audio_release,
34404 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c
34405 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
34406 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
34407 @@ -833,7 +833,7 @@ done:
34408 return rc;
34409 }
34410
34411 -static struct file_operations audio_amrnb_fops = {
34412 +static const struct file_operations audio_amrnb_fops = {
34413 .owner = THIS_MODULE,
34414 .open = audamrnb_open,
34415 .release = audamrnb_release,
34416 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c
34417 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
34418 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
34419 @@ -805,7 +805,7 @@ dma_fail:
34420 return rc;
34421 }
34422
34423 -static struct file_operations audio_evrc_fops = {
34424 +static const struct file_operations audio_evrc_fops = {
34425 .owner = THIS_MODULE,
34426 .open = audevrc_open,
34427 .release = audevrc_release,
34428 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c
34429 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
34430 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
34431 @@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
34432 return 0;
34433 }
34434
34435 -static struct file_operations audio_fops = {
34436 +static const struct file_operations audio_fops = {
34437 .owner = THIS_MODULE,
34438 .open = audio_in_open,
34439 .release = audio_in_release,
34440 @@ -922,7 +922,7 @@ static struct file_operations audio_fops
34441 .unlocked_ioctl = audio_in_ioctl,
34442 };
34443
34444 -static struct file_operations audpre_fops = {
34445 +static const struct file_operations audpre_fops = {
34446 .owner = THIS_MODULE,
34447 .open = audpre_open,
34448 .unlocked_ioctl = audpre_ioctl,
34449 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c
34450 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
34451 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
34452 @@ -941,7 +941,7 @@ done:
34453 return rc;
34454 }
34455
34456 -static struct file_operations audio_mp3_fops = {
34457 +static const struct file_operations audio_mp3_fops = {
34458 .owner = THIS_MODULE,
34459 .open = audio_open,
34460 .release = audio_release,
34461 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c
34462 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
34463 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
34464 @@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
34465 return 0;
34466 }
34467
34468 -static struct file_operations audio_fops = {
34469 +static const struct file_operations audio_fops = {
34470 .owner = THIS_MODULE,
34471 .open = audio_open,
34472 .release = audio_release,
34473 @@ -819,7 +819,7 @@ static struct file_operations audio_fops
34474 .unlocked_ioctl = audio_ioctl,
34475 };
34476
34477 -static struct file_operations audpp_fops = {
34478 +static const struct file_operations audpp_fops = {
34479 .owner = THIS_MODULE,
34480 .open = audpp_open,
34481 .unlocked_ioctl = audpp_ioctl,
34482 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c
34483 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
34484 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
34485 @@ -816,7 +816,7 @@ err:
34486 return rc;
34487 }
34488
34489 -static struct file_operations audio_qcelp_fops = {
34490 +static const struct file_operations audio_qcelp_fops = {
34491 .owner = THIS_MODULE,
34492 .open = audqcelp_open,
34493 .release = audqcelp_release,
34494 diff -urNp linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c
34495 --- linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
34496 +++ linux-2.6.32.42/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
34497 @@ -242,7 +242,7 @@ err:
34498 return rc;
34499 }
34500
34501 -static struct file_operations snd_fops = {
34502 +static const struct file_operations snd_fops = {
34503 .owner = THIS_MODULE,
34504 .open = snd_open,
34505 .release = snd_release,
34506 diff -urNp linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c
34507 --- linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
34508 +++ linux-2.6.32.42/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
34509 @@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
34510 return 0;
34511 }
34512
34513 -static struct file_operations qmi_fops = {
34514 +static const struct file_operations qmi_fops = {
34515 .owner = THIS_MODULE,
34516 .read = qmi_read,
34517 .write = qmi_write,
34518 diff -urNp linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c
34519 --- linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
34520 +++ linux-2.6.32.42/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
34521 @@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
34522 return rc;
34523 }
34524
34525 -static struct file_operations rpcrouter_server_fops = {
34526 +static const struct file_operations rpcrouter_server_fops = {
34527 .owner = THIS_MODULE,
34528 .open = rpcrouter_open,
34529 .release = rpcrouter_release,
34530 @@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
34531 .unlocked_ioctl = rpcrouter_ioctl,
34532 };
34533
34534 -static struct file_operations rpcrouter_router_fops = {
34535 +static const struct file_operations rpcrouter_router_fops = {
34536 .owner = THIS_MODULE,
34537 .open = rpcrouter_open,
34538 .release = rpcrouter_release,
34539 diff -urNp linux-2.6.32.42/drivers/staging/dst/dcore.c linux-2.6.32.42/drivers/staging/dst/dcore.c
34540 --- linux-2.6.32.42/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
34541 +++ linux-2.6.32.42/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
34542 @@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
34543 return 0;
34544 }
34545
34546 -static struct block_device_operations dst_blk_ops = {
34547 +static const struct block_device_operations dst_blk_ops = {
34548 .open = dst_bdev_open,
34549 .release = dst_bdev_release,
34550 .owner = THIS_MODULE,
34551 @@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
34552 n->size = ctl->size;
34553
34554 atomic_set(&n->refcnt, 1);
34555 - atomic_long_set(&n->gen, 0);
34556 + atomic_long_set_unchecked(&n->gen, 0);
34557 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
34558
34559 err = dst_node_sysfs_init(n);
34560 diff -urNp linux-2.6.32.42/drivers/staging/dst/trans.c linux-2.6.32.42/drivers/staging/dst/trans.c
34561 --- linux-2.6.32.42/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
34562 +++ linux-2.6.32.42/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
34563 @@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
34564 t->error = 0;
34565 t->retries = 0;
34566 atomic_set(&t->refcnt, 1);
34567 - t->gen = atomic_long_inc_return(&n->gen);
34568 + t->gen = atomic_long_inc_return_unchecked(&n->gen);
34569
34570 t->enc = bio_data_dir(bio);
34571 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
34572 diff -urNp linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c
34573 --- linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
34574 +++ linux-2.6.32.42/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
34575 @@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
34576 struct net_device_stats *stats = &etdev->net_stats;
34577
34578 if (pMpTcb->Flags & fMP_DEST_BROAD)
34579 - atomic_inc(&etdev->Stats.brdcstxmt);
34580 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
34581 else if (pMpTcb->Flags & fMP_DEST_MULTI)
34582 - atomic_inc(&etdev->Stats.multixmt);
34583 + atomic_inc_unchecked(&etdev->Stats.multixmt);
34584 else
34585 - atomic_inc(&etdev->Stats.unixmt);
34586 + atomic_inc_unchecked(&etdev->Stats.unixmt);
34587
34588 if (pMpTcb->Packet) {
34589 stats->tx_bytes += pMpTcb->Packet->len;
34590 diff -urNp linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h
34591 --- linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
34592 +++ linux-2.6.32.42/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
34593 @@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
34594 * operations
34595 */
34596 u32 unircv; /* # multicast packets received */
34597 - atomic_t unixmt; /* # multicast packets for Tx */
34598 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
34599 u32 multircv; /* # multicast packets received */
34600 - atomic_t multixmt; /* # multicast packets for Tx */
34601 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
34602 u32 brdcstrcv; /* # broadcast packets received */
34603 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
34604 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
34605 u32 norcvbuf; /* # Rx packets discarded */
34606 u32 noxmtbuf; /* # Tx packets discarded */
34607
34608 diff -urNp linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c
34609 --- linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
34610 +++ linux-2.6.32.42/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
34611 @@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
34612 return 0;
34613 }
34614
34615 -static struct vm_operations_struct go7007_vm_ops = {
34616 +static const struct vm_operations_struct go7007_vm_ops = {
34617 .open = go7007_vm_open,
34618 .close = go7007_vm_close,
34619 .fault = go7007_vm_fault,
34620 diff -urNp linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c
34621 --- linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
34622 +++ linux-2.6.32.42/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
34623 @@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
34624 /* The one and only one */
34625 static struct blkvsc_driver_context g_blkvsc_drv;
34626
34627 -static struct block_device_operations block_ops = {
34628 +static const struct block_device_operations block_ops = {
34629 .owner = THIS_MODULE,
34630 .open = blkvsc_open,
34631 .release = blkvsc_release,
34632 diff -urNp linux-2.6.32.42/drivers/staging/hv/Channel.c linux-2.6.32.42/drivers/staging/hv/Channel.c
34633 --- linux-2.6.32.42/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
34634 +++ linux-2.6.32.42/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
34635 @@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
34636
34637 DPRINT_ENTER(VMBUS);
34638
34639 - nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
34640 - atomic_inc(&gVmbusConnection.NextGpadlHandle);
34641 + nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
34642 + atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
34643
34644 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
34645 ASSERT(msgInfo != NULL);
34646 diff -urNp linux-2.6.32.42/drivers/staging/hv/Hv.c linux-2.6.32.42/drivers/staging/hv/Hv.c
34647 --- linux-2.6.32.42/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
34648 +++ linux-2.6.32.42/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
34649 @@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
34650 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
34651 u32 outputAddressHi = outputAddress >> 32;
34652 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
34653 - volatile void *hypercallPage = gHvContext.HypercallPage;
34654 + volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
34655
34656 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
34657 Control, Input, Output);
34658 diff -urNp linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c
34659 --- linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
34660 +++ linux-2.6.32.42/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
34661 @@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
34662 to_device_context(root_device_obj);
34663 struct device_context *child_device_ctx =
34664 to_device_context(child_device_obj);
34665 - static atomic_t device_num = ATOMIC_INIT(0);
34666 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
34667
34668 DPRINT_ENTER(VMBUS_DRV);
34669
34670 @@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
34671
34672 /* Set the device name. Otherwise, device_register() will fail. */
34673 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
34674 - atomic_inc_return(&device_num));
34675 + atomic_inc_return_unchecked(&device_num));
34676
34677 /* The new device belongs to this bus */
34678 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
34679 diff -urNp linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h
34680 --- linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
34681 +++ linux-2.6.32.42/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
34682 @@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
34683 struct VMBUS_CONNECTION {
34684 enum VMBUS_CONNECT_STATE ConnectState;
34685
34686 - atomic_t NextGpadlHandle;
34687 + atomic_unchecked_t NextGpadlHandle;
34688
34689 /*
34690 * Represents channel interrupts. Each bit position represents a
34691 diff -urNp linux-2.6.32.42/drivers/staging/octeon/ethernet.c linux-2.6.32.42/drivers/staging/octeon/ethernet.c
34692 --- linux-2.6.32.42/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
34693 +++ linux-2.6.32.42/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
34694 @@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
34695 * since the RX tasklet also increments it.
34696 */
34697 #ifdef CONFIG_64BIT
34698 - atomic64_add(rx_status.dropped_packets,
34699 - (atomic64_t *)&priv->stats.rx_dropped);
34700 + atomic64_add_unchecked(rx_status.dropped_packets,
34701 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
34702 #else
34703 - atomic_add(rx_status.dropped_packets,
34704 - (atomic_t *)&priv->stats.rx_dropped);
34705 + atomic_add_unchecked(rx_status.dropped_packets,
34706 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
34707 #endif
34708 }
34709
34710 diff -urNp linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c
34711 --- linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
34712 +++ linux-2.6.32.42/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
34713 @@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
34714 /* Increment RX stats for virtual ports */
34715 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
34716 #ifdef CONFIG_64BIT
34717 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
34718 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
34719 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
34720 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
34721 #else
34722 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
34723 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
34724 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
34725 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
34726 #endif
34727 }
34728 netif_receive_skb(skb);
34729 @@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
34730 dev->name);
34731 */
34732 #ifdef CONFIG_64BIT
34733 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
34734 + atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
34735 #else
34736 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
34737 + atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
34738 #endif
34739 dev_kfree_skb_irq(skb);
34740 }
34741 diff -urNp linux-2.6.32.42/drivers/staging/panel/panel.c linux-2.6.32.42/drivers/staging/panel/panel.c
34742 --- linux-2.6.32.42/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
34743 +++ linux-2.6.32.42/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
34744 @@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
34745 return 0;
34746 }
34747
34748 -static struct file_operations lcd_fops = {
34749 +static const struct file_operations lcd_fops = {
34750 .write = lcd_write,
34751 .open = lcd_open,
34752 .release = lcd_release,
34753 @@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
34754 return 0;
34755 }
34756
34757 -static struct file_operations keypad_fops = {
34758 +static const struct file_operations keypad_fops = {
34759 .read = keypad_read, /* read */
34760 .open = keypad_open, /* open */
34761 .release = keypad_release, /* close */
34762 diff -urNp linux-2.6.32.42/drivers/staging/phison/phison.c linux-2.6.32.42/drivers/staging/phison/phison.c
34763 --- linux-2.6.32.42/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
34764 +++ linux-2.6.32.42/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
34765 @@ -43,7 +43,7 @@ static struct scsi_host_template phison_
34766 ATA_BMDMA_SHT(DRV_NAME),
34767 };
34768
34769 -static struct ata_port_operations phison_ops = {
34770 +static const struct ata_port_operations phison_ops = {
34771 .inherits = &ata_bmdma_port_ops,
34772 .prereset = phison_pre_reset,
34773 };
34774 diff -urNp linux-2.6.32.42/drivers/staging/poch/poch.c linux-2.6.32.42/drivers/staging/poch/poch.c
34775 --- linux-2.6.32.42/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
34776 +++ linux-2.6.32.42/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
34777 @@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
34778 return 0;
34779 }
34780
34781 -static struct file_operations poch_fops = {
34782 +static const struct file_operations poch_fops = {
34783 .owner = THIS_MODULE,
34784 .open = poch_open,
34785 .release = poch_release,
34786 diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/inode.c linux-2.6.32.42/drivers/staging/pohmelfs/inode.c
34787 --- linux-2.6.32.42/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
34788 +++ linux-2.6.32.42/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
34789 @@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
34790 mutex_init(&psb->mcache_lock);
34791 psb->mcache_root = RB_ROOT;
34792 psb->mcache_timeout = msecs_to_jiffies(5000);
34793 - atomic_long_set(&psb->mcache_gen, 0);
34794 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
34795
34796 psb->trans_max_pages = 100;
34797
34798 @@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
34799 INIT_LIST_HEAD(&psb->crypto_ready_list);
34800 INIT_LIST_HEAD(&psb->crypto_active_list);
34801
34802 - atomic_set(&psb->trans_gen, 1);
34803 + atomic_set_unchecked(&psb->trans_gen, 1);
34804 atomic_long_set(&psb->total_inodes, 0);
34805
34806 mutex_init(&psb->state_lock);
34807 diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c
34808 --- linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
34809 +++ linux-2.6.32.42/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
34810 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
34811 m->data = data;
34812 m->start = start;
34813 m->size = size;
34814 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
34815 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
34816
34817 mutex_lock(&psb->mcache_lock);
34818 err = pohmelfs_mcache_insert(psb, m);
34819 diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h
34820 --- linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
34821 +++ linux-2.6.32.42/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
34822 @@ -570,14 +570,14 @@ struct pohmelfs_config;
34823 struct pohmelfs_sb {
34824 struct rb_root mcache_root;
34825 struct mutex mcache_lock;
34826 - atomic_long_t mcache_gen;
34827 + atomic_long_unchecked_t mcache_gen;
34828 unsigned long mcache_timeout;
34829
34830 unsigned int idx;
34831
34832 unsigned int trans_retries;
34833
34834 - atomic_t trans_gen;
34835 + atomic_unchecked_t trans_gen;
34836
34837 unsigned int crypto_attached_size;
34838 unsigned int crypto_align_size;
34839 diff -urNp linux-2.6.32.42/drivers/staging/pohmelfs/trans.c linux-2.6.32.42/drivers/staging/pohmelfs/trans.c
34840 --- linux-2.6.32.42/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
34841 +++ linux-2.6.32.42/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
34842 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
34843 int err;
34844 struct netfs_cmd *cmd = t->iovec.iov_base;
34845
34846 - t->gen = atomic_inc_return(&psb->trans_gen);
34847 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
34848
34849 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
34850 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
34851 diff -urNp linux-2.6.32.42/drivers/staging/sep/sep_driver.c linux-2.6.32.42/drivers/staging/sep/sep_driver.c
34852 --- linux-2.6.32.42/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
34853 +++ linux-2.6.32.42/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
34854 @@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
34855 static dev_t sep_devno;
34856
34857 /* the files operations structure of the driver */
34858 -static struct file_operations sep_file_operations = {
34859 +static const struct file_operations sep_file_operations = {
34860 .owner = THIS_MODULE,
34861 .ioctl = sep_ioctl,
34862 .poll = sep_poll,
34863 diff -urNp linux-2.6.32.42/drivers/staging/usbip/vhci.h linux-2.6.32.42/drivers/staging/usbip/vhci.h
34864 --- linux-2.6.32.42/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
34865 +++ linux-2.6.32.42/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
34866 @@ -92,7 +92,7 @@ struct vhci_hcd {
34867 unsigned resuming:1;
34868 unsigned long re_timeout;
34869
34870 - atomic_t seqnum;
34871 + atomic_unchecked_t seqnum;
34872
34873 /*
34874 * NOTE:
34875 diff -urNp linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c
34876 --- linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
34877 +++ linux-2.6.32.42/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
34878 @@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
34879 return;
34880 }
34881
34882 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
34883 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34884 if (priv->seqnum == 0xffff)
34885 usbip_uinfo("seqnum max\n");
34886
34887 @@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
34888 return -ENOMEM;
34889 }
34890
34891 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
34892 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
34893 if (unlink->seqnum == 0xffff)
34894 usbip_uinfo("seqnum max\n");
34895
34896 @@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
34897 vdev->rhport = rhport;
34898 }
34899
34900 - atomic_set(&vhci->seqnum, 0);
34901 + atomic_set_unchecked(&vhci->seqnum, 0);
34902 spin_lock_init(&vhci->lock);
34903
34904
34905 diff -urNp linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c
34906 --- linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
34907 +++ linux-2.6.32.42/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
34908 @@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
34909 usbip_uerr("cannot find a urb of seqnum %u\n",
34910 pdu->base.seqnum);
34911 usbip_uinfo("max seqnum %d\n",
34912 - atomic_read(&the_controller->seqnum));
34913 + atomic_read_unchecked(&the_controller->seqnum));
34914 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
34915 return;
34916 }
34917 diff -urNp linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c
34918 --- linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
34919 +++ linux-2.6.32.42/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
34920 @@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
34921 static int __init vme_user_probe(struct device *, int, int);
34922 static int __exit vme_user_remove(struct device *, int, int);
34923
34924 -static struct file_operations vme_user_fops = {
34925 +static const struct file_operations vme_user_fops = {
34926 .open = vme_user_open,
34927 .release = vme_user_release,
34928 .read = vme_user_read,
34929 diff -urNp linux-2.6.32.42/drivers/telephony/ixj.c linux-2.6.32.42/drivers/telephony/ixj.c
34930 --- linux-2.6.32.42/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
34931 +++ linux-2.6.32.42/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
34932 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
34933 bool mContinue;
34934 char *pIn, *pOut;
34935
34936 + pax_track_stack();
34937 +
34938 if (!SCI_Prepare(j))
34939 return 0;
34940
34941 diff -urNp linux-2.6.32.42/drivers/uio/uio.c linux-2.6.32.42/drivers/uio/uio.c
34942 --- linux-2.6.32.42/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
34943 +++ linux-2.6.32.42/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
34944 @@ -23,6 +23,7 @@
34945 #include <linux/string.h>
34946 #include <linux/kobject.h>
34947 #include <linux/uio_driver.h>
34948 +#include <asm/local.h>
34949
34950 #define UIO_MAX_DEVICES 255
34951
34952 @@ -30,10 +31,10 @@ struct uio_device {
34953 struct module *owner;
34954 struct device *dev;
34955 int minor;
34956 - atomic_t event;
34957 + atomic_unchecked_t event;
34958 struct fasync_struct *async_queue;
34959 wait_queue_head_t wait;
34960 - int vma_count;
34961 + local_t vma_count;
34962 struct uio_info *info;
34963 struct kobject *map_dir;
34964 struct kobject *portio_dir;
34965 @@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
34966 return entry->show(mem, buf);
34967 }
34968
34969 -static struct sysfs_ops map_sysfs_ops = {
34970 +static const struct sysfs_ops map_sysfs_ops = {
34971 .show = map_type_show,
34972 };
34973
34974 @@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
34975 return entry->show(port, buf);
34976 }
34977
34978 -static struct sysfs_ops portio_sysfs_ops = {
34979 +static const struct sysfs_ops portio_sysfs_ops = {
34980 .show = portio_type_show,
34981 };
34982
34983 @@ -255,7 +256,7 @@ static ssize_t show_event(struct device
34984 struct uio_device *idev = dev_get_drvdata(dev);
34985 if (idev)
34986 return sprintf(buf, "%u\n",
34987 - (unsigned int)atomic_read(&idev->event));
34988 + (unsigned int)atomic_read_unchecked(&idev->event));
34989 else
34990 return -ENODEV;
34991 }
34992 @@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
34993 {
34994 struct uio_device *idev = info->uio_dev;
34995
34996 - atomic_inc(&idev->event);
34997 + atomic_inc_unchecked(&idev->event);
34998 wake_up_interruptible(&idev->wait);
34999 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
35000 }
35001 @@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
35002 }
35003
35004 listener->dev = idev;
35005 - listener->event_count = atomic_read(&idev->event);
35006 + listener->event_count = atomic_read_unchecked(&idev->event);
35007 filep->private_data = listener;
35008
35009 if (idev->info->open) {
35010 @@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
35011 return -EIO;
35012
35013 poll_wait(filep, &idev->wait, wait);
35014 - if (listener->event_count != atomic_read(&idev->event))
35015 + if (listener->event_count != atomic_read_unchecked(&idev->event))
35016 return POLLIN | POLLRDNORM;
35017 return 0;
35018 }
35019 @@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
35020 do {
35021 set_current_state(TASK_INTERRUPTIBLE);
35022
35023 - event_count = atomic_read(&idev->event);
35024 + event_count = atomic_read_unchecked(&idev->event);
35025 if (event_count != listener->event_count) {
35026 if (copy_to_user(buf, &event_count, count))
35027 retval = -EFAULT;
35028 @@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
35029 static void uio_vma_open(struct vm_area_struct *vma)
35030 {
35031 struct uio_device *idev = vma->vm_private_data;
35032 - idev->vma_count++;
35033 + local_inc(&idev->vma_count);
35034 }
35035
35036 static void uio_vma_close(struct vm_area_struct *vma)
35037 {
35038 struct uio_device *idev = vma->vm_private_data;
35039 - idev->vma_count--;
35040 + local_dec(&idev->vma_count);
35041 }
35042
35043 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35044 @@ -840,7 +841,7 @@ int __uio_register_device(struct module
35045 idev->owner = owner;
35046 idev->info = info;
35047 init_waitqueue_head(&idev->wait);
35048 - atomic_set(&idev->event, 0);
35049 + atomic_set_unchecked(&idev->event, 0);
35050
35051 ret = uio_get_minor(idev);
35052 if (ret)
35053 diff -urNp linux-2.6.32.42/drivers/usb/atm/usbatm.c linux-2.6.32.42/drivers/usb/atm/usbatm.c
35054 --- linux-2.6.32.42/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
35055 +++ linux-2.6.32.42/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
35056 @@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
35057 if (printk_ratelimit())
35058 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
35059 __func__, vpi, vci);
35060 - atomic_inc(&vcc->stats->rx_err);
35061 + atomic_inc_unchecked(&vcc->stats->rx_err);
35062 return;
35063 }
35064
35065 @@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
35066 if (length > ATM_MAX_AAL5_PDU) {
35067 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
35068 __func__, length, vcc);
35069 - atomic_inc(&vcc->stats->rx_err);
35070 + atomic_inc_unchecked(&vcc->stats->rx_err);
35071 goto out;
35072 }
35073
35074 @@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
35075 if (sarb->len < pdu_length) {
35076 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
35077 __func__, pdu_length, sarb->len, vcc);
35078 - atomic_inc(&vcc->stats->rx_err);
35079 + atomic_inc_unchecked(&vcc->stats->rx_err);
35080 goto out;
35081 }
35082
35083 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
35084 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
35085 __func__, vcc);
35086 - atomic_inc(&vcc->stats->rx_err);
35087 + atomic_inc_unchecked(&vcc->stats->rx_err);
35088 goto out;
35089 }
35090
35091 @@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
35092 if (printk_ratelimit())
35093 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
35094 __func__, length);
35095 - atomic_inc(&vcc->stats->rx_drop);
35096 + atomic_inc_unchecked(&vcc->stats->rx_drop);
35097 goto out;
35098 }
35099
35100 @@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
35101
35102 vcc->push(vcc, skb);
35103
35104 - atomic_inc(&vcc->stats->rx);
35105 + atomic_inc_unchecked(&vcc->stats->rx);
35106 out:
35107 skb_trim(sarb, 0);
35108 }
35109 @@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
35110 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
35111
35112 usbatm_pop(vcc, skb);
35113 - atomic_inc(&vcc->stats->tx);
35114 + atomic_inc_unchecked(&vcc->stats->tx);
35115
35116 skb = skb_dequeue(&instance->sndqueue);
35117 }
35118 @@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
35119 if (!left--)
35120 return sprintf(page,
35121 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
35122 - atomic_read(&atm_dev->stats.aal5.tx),
35123 - atomic_read(&atm_dev->stats.aal5.tx_err),
35124 - atomic_read(&atm_dev->stats.aal5.rx),
35125 - atomic_read(&atm_dev->stats.aal5.rx_err),
35126 - atomic_read(&atm_dev->stats.aal5.rx_drop));
35127 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
35128 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
35129 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
35130 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
35131 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
35132
35133 if (!left--) {
35134 if (instance->disconnected)
35135 diff -urNp linux-2.6.32.42/drivers/usb/class/cdc-wdm.c linux-2.6.32.42/drivers/usb/class/cdc-wdm.c
35136 --- linux-2.6.32.42/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
35137 +++ linux-2.6.32.42/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
35138 @@ -314,7 +314,7 @@ static ssize_t wdm_write
35139 if (r < 0)
35140 goto outnp;
35141
35142 - if (!file->f_flags && O_NONBLOCK)
35143 + if (!(file->f_flags & O_NONBLOCK))
35144 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
35145 &desc->flags));
35146 else
35147 diff -urNp linux-2.6.32.42/drivers/usb/core/hcd.c linux-2.6.32.42/drivers/usb/core/hcd.c
35148 --- linux-2.6.32.42/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
35149 +++ linux-2.6.32.42/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
35150 @@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
35151
35152 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35153
35154 -struct usb_mon_operations *mon_ops;
35155 +const struct usb_mon_operations *mon_ops;
35156
35157 /*
35158 * The registration is unlocked.
35159 @@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
35160 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
35161 */
35162
35163 -int usb_mon_register (struct usb_mon_operations *ops)
35164 +int usb_mon_register (const struct usb_mon_operations *ops)
35165 {
35166
35167 if (mon_ops)
35168 diff -urNp linux-2.6.32.42/drivers/usb/core/hcd.h linux-2.6.32.42/drivers/usb/core/hcd.h
35169 --- linux-2.6.32.42/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
35170 +++ linux-2.6.32.42/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
35171 @@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
35172 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35173
35174 struct usb_mon_operations {
35175 - void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
35176 - void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35177 - void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35178 + void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
35179 + void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35180 + void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35181 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
35182 };
35183
35184 -extern struct usb_mon_operations *mon_ops;
35185 +extern const struct usb_mon_operations *mon_ops;
35186
35187 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
35188 {
35189 @@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
35190 (*mon_ops->urb_complete)(bus, urb, status);
35191 }
35192
35193 -int usb_mon_register(struct usb_mon_operations *ops);
35194 +int usb_mon_register(const struct usb_mon_operations *ops);
35195 void usb_mon_deregister(void);
35196
35197 #else
35198 diff -urNp linux-2.6.32.42/drivers/usb/core/message.c linux-2.6.32.42/drivers/usb/core/message.c
35199 --- linux-2.6.32.42/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
35200 +++ linux-2.6.32.42/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
35201 @@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
35202 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
35203 if (buf) {
35204 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
35205 - if (len > 0) {
35206 - smallbuf = kmalloc(++len, GFP_NOIO);
35207 + if (len++ > 0) {
35208 + smallbuf = kmalloc(len, GFP_NOIO);
35209 if (!smallbuf)
35210 return buf;
35211 memcpy(smallbuf, buf, len);
35212 diff -urNp linux-2.6.32.42/drivers/usb/misc/appledisplay.c linux-2.6.32.42/drivers/usb/misc/appledisplay.c
35213 --- linux-2.6.32.42/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
35214 +++ linux-2.6.32.42/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
35215 @@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
35216 return pdata->msgdata[1];
35217 }
35218
35219 -static struct backlight_ops appledisplay_bl_data = {
35220 +static const struct backlight_ops appledisplay_bl_data = {
35221 .get_brightness = appledisplay_bl_get_brightness,
35222 .update_status = appledisplay_bl_update_status,
35223 };
35224 diff -urNp linux-2.6.32.42/drivers/usb/mon/mon_main.c linux-2.6.32.42/drivers/usb/mon/mon_main.c
35225 --- linux-2.6.32.42/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
35226 +++ linux-2.6.32.42/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
35227 @@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
35228 /*
35229 * Ops
35230 */
35231 -static struct usb_mon_operations mon_ops_0 = {
35232 +static const struct usb_mon_operations mon_ops_0 = {
35233 .urb_submit = mon_submit,
35234 .urb_submit_error = mon_submit_error,
35235 .urb_complete = mon_complete,
35236 diff -urNp linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h
35237 --- linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
35238 +++ linux-2.6.32.42/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
35239 @@ -192,7 +192,7 @@ struct wahc {
35240 struct list_head xfer_delayed_list;
35241 spinlock_t xfer_list_lock;
35242 struct work_struct xfer_work;
35243 - atomic_t xfer_id_count;
35244 + atomic_unchecked_t xfer_id_count;
35245 };
35246
35247
35248 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
35249 INIT_LIST_HEAD(&wa->xfer_delayed_list);
35250 spin_lock_init(&wa->xfer_list_lock);
35251 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
35252 - atomic_set(&wa->xfer_id_count, 1);
35253 + atomic_set_unchecked(&wa->xfer_id_count, 1);
35254 }
35255
35256 /**
35257 diff -urNp linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c
35258 --- linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
35259 +++ linux-2.6.32.42/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
35260 @@ -293,7 +293,7 @@ out:
35261 */
35262 static void wa_xfer_id_init(struct wa_xfer *xfer)
35263 {
35264 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
35265 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
35266 }
35267
35268 /*
35269 diff -urNp linux-2.6.32.42/drivers/uwb/wlp/messages.c linux-2.6.32.42/drivers/uwb/wlp/messages.c
35270 --- linux-2.6.32.42/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
35271 +++ linux-2.6.32.42/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
35272 @@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
35273 size_t len = skb->len;
35274 size_t used;
35275 ssize_t result;
35276 - struct wlp_nonce enonce, rnonce;
35277 + struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
35278 enum wlp_assc_error assc_err;
35279 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
35280 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
35281 diff -urNp linux-2.6.32.42/drivers/uwb/wlp/sysfs.c linux-2.6.32.42/drivers/uwb/wlp/sysfs.c
35282 --- linux-2.6.32.42/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
35283 +++ linux-2.6.32.42/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
35284 @@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
35285 return ret;
35286 }
35287
35288 -static
35289 -struct sysfs_ops wss_sysfs_ops = {
35290 +static const struct sysfs_ops wss_sysfs_ops = {
35291 .show = wlp_wss_attr_show,
35292 .store = wlp_wss_attr_store,
35293 };
35294 diff -urNp linux-2.6.32.42/drivers/video/atmel_lcdfb.c linux-2.6.32.42/drivers/video/atmel_lcdfb.c
35295 --- linux-2.6.32.42/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
35296 +++ linux-2.6.32.42/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
35297 @@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
35298 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
35299 }
35300
35301 -static struct backlight_ops atmel_lcdc_bl_ops = {
35302 +static const struct backlight_ops atmel_lcdc_bl_ops = {
35303 .update_status = atmel_bl_update_status,
35304 .get_brightness = atmel_bl_get_brightness,
35305 };
35306 diff -urNp linux-2.6.32.42/drivers/video/aty/aty128fb.c linux-2.6.32.42/drivers/video/aty/aty128fb.c
35307 --- linux-2.6.32.42/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
35308 +++ linux-2.6.32.42/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
35309 @@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
35310 return bd->props.brightness;
35311 }
35312
35313 -static struct backlight_ops aty128_bl_data = {
35314 +static const struct backlight_ops aty128_bl_data = {
35315 .get_brightness = aty128_bl_get_brightness,
35316 .update_status = aty128_bl_update_status,
35317 };
35318 diff -urNp linux-2.6.32.42/drivers/video/aty/atyfb_base.c linux-2.6.32.42/drivers/video/aty/atyfb_base.c
35319 --- linux-2.6.32.42/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
35320 +++ linux-2.6.32.42/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
35321 @@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
35322 return bd->props.brightness;
35323 }
35324
35325 -static struct backlight_ops aty_bl_data = {
35326 +static const struct backlight_ops aty_bl_data = {
35327 .get_brightness = aty_bl_get_brightness,
35328 .update_status = aty_bl_update_status,
35329 };
35330 diff -urNp linux-2.6.32.42/drivers/video/aty/radeon_backlight.c linux-2.6.32.42/drivers/video/aty/radeon_backlight.c
35331 --- linux-2.6.32.42/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
35332 +++ linux-2.6.32.42/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
35333 @@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
35334 return bd->props.brightness;
35335 }
35336
35337 -static struct backlight_ops radeon_bl_data = {
35338 +static const struct backlight_ops radeon_bl_data = {
35339 .get_brightness = radeon_bl_get_brightness,
35340 .update_status = radeon_bl_update_status,
35341 };
35342 diff -urNp linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c
35343 --- linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
35344 +++ linux-2.6.32.42/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
35345 @@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
35346 return error ? data->current_brightness : reg_val;
35347 }
35348
35349 -static struct backlight_ops adp5520_bl_ops = {
35350 +static const struct backlight_ops adp5520_bl_ops = {
35351 .update_status = adp5520_bl_update_status,
35352 .get_brightness = adp5520_bl_get_brightness,
35353 };
35354 diff -urNp linux-2.6.32.42/drivers/video/backlight/adx_bl.c linux-2.6.32.42/drivers/video/backlight/adx_bl.c
35355 --- linux-2.6.32.42/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
35356 +++ linux-2.6.32.42/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
35357 @@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
35358 return 1;
35359 }
35360
35361 -static struct backlight_ops adx_backlight_ops = {
35362 +static const struct backlight_ops adx_backlight_ops = {
35363 .options = 0,
35364 .update_status = adx_backlight_update_status,
35365 .get_brightness = adx_backlight_get_brightness,
35366 diff -urNp linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c
35367 --- linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
35368 +++ linux-2.6.32.42/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
35369 @@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
35370 return pwm_channel_enable(&pwmbl->pwmc);
35371 }
35372
35373 -static struct backlight_ops atmel_pwm_bl_ops = {
35374 +static const struct backlight_ops atmel_pwm_bl_ops = {
35375 .get_brightness = atmel_pwm_bl_get_intensity,
35376 .update_status = atmel_pwm_bl_set_intensity,
35377 };
35378 diff -urNp linux-2.6.32.42/drivers/video/backlight/backlight.c linux-2.6.32.42/drivers/video/backlight/backlight.c
35379 --- linux-2.6.32.42/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
35380 +++ linux-2.6.32.42/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
35381 @@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
35382 * ERR_PTR() or a pointer to the newly allocated device.
35383 */
35384 struct backlight_device *backlight_device_register(const char *name,
35385 - struct device *parent, void *devdata, struct backlight_ops *ops)
35386 + struct device *parent, void *devdata, const struct backlight_ops *ops)
35387 {
35388 struct backlight_device *new_bd;
35389 int rc;
35390 diff -urNp linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c
35391 --- linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
35392 +++ linux-2.6.32.42/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
35393 @@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
35394 }
35395 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
35396
35397 -static struct backlight_ops corgi_bl_ops = {
35398 +static const struct backlight_ops corgi_bl_ops = {
35399 .get_brightness = corgi_bl_get_intensity,
35400 .update_status = corgi_bl_update_status,
35401 };
35402 diff -urNp linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c
35403 --- linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
35404 +++ linux-2.6.32.42/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
35405 @@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
35406 return intensity;
35407 }
35408
35409 -static struct backlight_ops cr_backlight_ops = {
35410 +static const struct backlight_ops cr_backlight_ops = {
35411 .get_brightness = cr_backlight_get_intensity,
35412 .update_status = cr_backlight_set_intensity,
35413 };
35414 diff -urNp linux-2.6.32.42/drivers/video/backlight/da903x_bl.c linux-2.6.32.42/drivers/video/backlight/da903x_bl.c
35415 --- linux-2.6.32.42/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
35416 +++ linux-2.6.32.42/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
35417 @@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
35418 return data->current_brightness;
35419 }
35420
35421 -static struct backlight_ops da903x_backlight_ops = {
35422 +static const struct backlight_ops da903x_backlight_ops = {
35423 .update_status = da903x_backlight_update_status,
35424 .get_brightness = da903x_backlight_get_brightness,
35425 };
35426 diff -urNp linux-2.6.32.42/drivers/video/backlight/generic_bl.c linux-2.6.32.42/drivers/video/backlight/generic_bl.c
35427 --- linux-2.6.32.42/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
35428 +++ linux-2.6.32.42/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
35429 @@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
35430 }
35431 EXPORT_SYMBOL(corgibl_limit_intensity);
35432
35433 -static struct backlight_ops genericbl_ops = {
35434 +static const struct backlight_ops genericbl_ops = {
35435 .options = BL_CORE_SUSPENDRESUME,
35436 .get_brightness = genericbl_get_intensity,
35437 .update_status = genericbl_send_intensity,
35438 diff -urNp linux-2.6.32.42/drivers/video/backlight/hp680_bl.c linux-2.6.32.42/drivers/video/backlight/hp680_bl.c
35439 --- linux-2.6.32.42/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
35440 +++ linux-2.6.32.42/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
35441 @@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
35442 return current_intensity;
35443 }
35444
35445 -static struct backlight_ops hp680bl_ops = {
35446 +static const struct backlight_ops hp680bl_ops = {
35447 .get_brightness = hp680bl_get_intensity,
35448 .update_status = hp680bl_set_intensity,
35449 };
35450 diff -urNp linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c
35451 --- linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
35452 +++ linux-2.6.32.42/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
35453 @@ -93,7 +93,7 @@ out:
35454 return ret;
35455 }
35456
35457 -static struct backlight_ops jornada_bl_ops = {
35458 +static const struct backlight_ops jornada_bl_ops = {
35459 .get_brightness = jornada_bl_get_brightness,
35460 .update_status = jornada_bl_update_status,
35461 .options = BL_CORE_SUSPENDRESUME,
35462 diff -urNp linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c
35463 --- linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
35464 +++ linux-2.6.32.42/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
35465 @@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
35466 return kb3886bl_intensity;
35467 }
35468
35469 -static struct backlight_ops kb3886bl_ops = {
35470 +static const struct backlight_ops kb3886bl_ops = {
35471 .get_brightness = kb3886bl_get_intensity,
35472 .update_status = kb3886bl_send_intensity,
35473 };
35474 diff -urNp linux-2.6.32.42/drivers/video/backlight/locomolcd.c linux-2.6.32.42/drivers/video/backlight/locomolcd.c
35475 --- linux-2.6.32.42/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
35476 +++ linux-2.6.32.42/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
35477 @@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
35478 return current_intensity;
35479 }
35480
35481 -static struct backlight_ops locomobl_data = {
35482 +static const struct backlight_ops locomobl_data = {
35483 .get_brightness = locomolcd_get_intensity,
35484 .update_status = locomolcd_set_intensity,
35485 };
35486 diff -urNp linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c
35487 --- linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
35488 +++ linux-2.6.32.42/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
35489 @@ -33,7 +33,7 @@ struct dmi_match_data {
35490 unsigned long iostart;
35491 unsigned long iolen;
35492 /* Backlight operations structure. */
35493 - struct backlight_ops backlight_ops;
35494 + const struct backlight_ops backlight_ops;
35495 };
35496
35497 /* Module parameters. */
35498 diff -urNp linux-2.6.32.42/drivers/video/backlight/omap1_bl.c linux-2.6.32.42/drivers/video/backlight/omap1_bl.c
35499 --- linux-2.6.32.42/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
35500 +++ linux-2.6.32.42/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
35501 @@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
35502 return bl->current_intensity;
35503 }
35504
35505 -static struct backlight_ops omapbl_ops = {
35506 +static const struct backlight_ops omapbl_ops = {
35507 .get_brightness = omapbl_get_intensity,
35508 .update_status = omapbl_update_status,
35509 };
35510 diff -urNp linux-2.6.32.42/drivers/video/backlight/progear_bl.c linux-2.6.32.42/drivers/video/backlight/progear_bl.c
35511 --- linux-2.6.32.42/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
35512 +++ linux-2.6.32.42/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
35513 @@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
35514 return intensity - HW_LEVEL_MIN;
35515 }
35516
35517 -static struct backlight_ops progearbl_ops = {
35518 +static const struct backlight_ops progearbl_ops = {
35519 .get_brightness = progearbl_get_intensity,
35520 .update_status = progearbl_set_intensity,
35521 };
35522 diff -urNp linux-2.6.32.42/drivers/video/backlight/pwm_bl.c linux-2.6.32.42/drivers/video/backlight/pwm_bl.c
35523 --- linux-2.6.32.42/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
35524 +++ linux-2.6.32.42/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
35525 @@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
35526 return bl->props.brightness;
35527 }
35528
35529 -static struct backlight_ops pwm_backlight_ops = {
35530 +static const struct backlight_ops pwm_backlight_ops = {
35531 .update_status = pwm_backlight_update_status,
35532 .get_brightness = pwm_backlight_get_brightness,
35533 };
35534 diff -urNp linux-2.6.32.42/drivers/video/backlight/tosa_bl.c linux-2.6.32.42/drivers/video/backlight/tosa_bl.c
35535 --- linux-2.6.32.42/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
35536 +++ linux-2.6.32.42/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
35537 @@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
35538 return props->brightness;
35539 }
35540
35541 -static struct backlight_ops bl_ops = {
35542 +static const struct backlight_ops bl_ops = {
35543 .get_brightness = tosa_bl_get_brightness,
35544 .update_status = tosa_bl_update_status,
35545 };
35546 diff -urNp linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c
35547 --- linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
35548 +++ linux-2.6.32.42/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
35549 @@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
35550 return data->current_brightness;
35551 }
35552
35553 -static struct backlight_ops wm831x_backlight_ops = {
35554 +static const struct backlight_ops wm831x_backlight_ops = {
35555 .options = BL_CORE_SUSPENDRESUME,
35556 .update_status = wm831x_backlight_update_status,
35557 .get_brightness = wm831x_backlight_get_brightness,
35558 diff -urNp linux-2.6.32.42/drivers/video/bf54x-lq043fb.c linux-2.6.32.42/drivers/video/bf54x-lq043fb.c
35559 --- linux-2.6.32.42/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
35560 +++ linux-2.6.32.42/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
35561 @@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
35562 return 0;
35563 }
35564
35565 -static struct backlight_ops bfin_lq043fb_bl_ops = {
35566 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
35567 .get_brightness = bl_get_brightness,
35568 };
35569
35570 diff -urNp linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c
35571 --- linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
35572 +++ linux-2.6.32.42/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
35573 @@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
35574 return 0;
35575 }
35576
35577 -static struct backlight_ops bfin_lq043fb_bl_ops = {
35578 +static const struct backlight_ops bfin_lq043fb_bl_ops = {
35579 .get_brightness = bl_get_brightness,
35580 };
35581
35582 diff -urNp linux-2.6.32.42/drivers/video/fbcmap.c linux-2.6.32.42/drivers/video/fbcmap.c
35583 --- linux-2.6.32.42/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
35584 +++ linux-2.6.32.42/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
35585 @@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
35586 rc = -ENODEV;
35587 goto out;
35588 }
35589 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
35590 - !info->fbops->fb_setcmap)) {
35591 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
35592 rc = -EINVAL;
35593 goto out1;
35594 }
35595 diff -urNp linux-2.6.32.42/drivers/video/fbmem.c linux-2.6.32.42/drivers/video/fbmem.c
35596 --- linux-2.6.32.42/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
35597 +++ linux-2.6.32.42/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
35598 @@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
35599 image->dx += image->width + 8;
35600 }
35601 } else if (rotate == FB_ROTATE_UD) {
35602 - for (x = 0; x < num && image->dx >= 0; x++) {
35603 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
35604 info->fbops->fb_imageblit(info, image);
35605 image->dx -= image->width + 8;
35606 }
35607 @@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
35608 image->dy += image->height + 8;
35609 }
35610 } else if (rotate == FB_ROTATE_CCW) {
35611 - for (x = 0; x < num && image->dy >= 0; x++) {
35612 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
35613 info->fbops->fb_imageblit(info, image);
35614 image->dy -= image->height + 8;
35615 }
35616 @@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
35617 int flags = info->flags;
35618 int ret = 0;
35619
35620 + pax_track_stack();
35621 +
35622 if (var->activate & FB_ACTIVATE_INV_MODE) {
35623 struct fb_videomode mode1, mode2;
35624
35625 @@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
35626 void __user *argp = (void __user *)arg;
35627 long ret = 0;
35628
35629 + pax_track_stack();
35630 +
35631 switch (cmd) {
35632 case FBIOGET_VSCREENINFO:
35633 if (!lock_fb_info(info))
35634 @@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
35635 return -EFAULT;
35636 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
35637 return -EINVAL;
35638 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
35639 + if (con2fb.framebuffer >= FB_MAX)
35640 return -EINVAL;
35641 if (!registered_fb[con2fb.framebuffer])
35642 request_module("fb%d", con2fb.framebuffer);
35643 diff -urNp linux-2.6.32.42/drivers/video/i810/i810_accel.c linux-2.6.32.42/drivers/video/i810/i810_accel.c
35644 --- linux-2.6.32.42/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
35645 +++ linux-2.6.32.42/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
35646 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
35647 }
35648 }
35649 printk("ringbuffer lockup!!!\n");
35650 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
35651 i810_report_error(mmio);
35652 par->dev_flags |= LOCKUP;
35653 info->pixmap.scan_align = 1;
35654 diff -urNp linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c
35655 --- linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
35656 +++ linux-2.6.32.42/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
35657 @@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
35658 return bd->props.brightness;
35659 }
35660
35661 -static struct backlight_ops nvidia_bl_ops = {
35662 +static const struct backlight_ops nvidia_bl_ops = {
35663 .get_brightness = nvidia_bl_get_brightness,
35664 .update_status = nvidia_bl_update_status,
35665 };
35666 diff -urNp linux-2.6.32.42/drivers/video/riva/fbdev.c linux-2.6.32.42/drivers/video/riva/fbdev.c
35667 --- linux-2.6.32.42/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
35668 +++ linux-2.6.32.42/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
35669 @@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
35670 return bd->props.brightness;
35671 }
35672
35673 -static struct backlight_ops riva_bl_ops = {
35674 +static const struct backlight_ops riva_bl_ops = {
35675 .get_brightness = riva_bl_get_brightness,
35676 .update_status = riva_bl_update_status,
35677 };
35678 diff -urNp linux-2.6.32.42/drivers/video/uvesafb.c linux-2.6.32.42/drivers/video/uvesafb.c
35679 --- linux-2.6.32.42/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
35680 +++ linux-2.6.32.42/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
35681 @@ -18,6 +18,7 @@
35682 #include <linux/fb.h>
35683 #include <linux/io.h>
35684 #include <linux/mutex.h>
35685 +#include <linux/moduleloader.h>
35686 #include <video/edid.h>
35687 #include <video/uvesafb.h>
35688 #ifdef CONFIG_X86
35689 @@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
35690 NULL,
35691 };
35692
35693 - return call_usermodehelper(v86d_path, argv, envp, 1);
35694 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
35695 }
35696
35697 /*
35698 @@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
35699 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
35700 par->pmi_setpal = par->ypan = 0;
35701 } else {
35702 +
35703 +#ifdef CONFIG_PAX_KERNEXEC
35704 +#ifdef CONFIG_MODULES
35705 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
35706 +#endif
35707 + if (!par->pmi_code) {
35708 + par->pmi_setpal = par->ypan = 0;
35709 + return 0;
35710 + }
35711 +#endif
35712 +
35713 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
35714 + task->t.regs.edi);
35715 +
35716 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35717 + pax_open_kernel();
35718 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
35719 + pax_close_kernel();
35720 +
35721 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
35722 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
35723 +#else
35724 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
35725 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
35726 +#endif
35727 +
35728 printk(KERN_INFO "uvesafb: protected mode interface info at "
35729 "%04x:%04x\n",
35730 (u16)task->t.regs.es, (u16)task->t.regs.edi);
35731 @@ -1799,6 +1822,11 @@ out:
35732 if (par->vbe_modes)
35733 kfree(par->vbe_modes);
35734
35735 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35736 + if (par->pmi_code)
35737 + module_free_exec(NULL, par->pmi_code);
35738 +#endif
35739 +
35740 framebuffer_release(info);
35741 return err;
35742 }
35743 @@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
35744 kfree(par->vbe_state_orig);
35745 if (par->vbe_state_saved)
35746 kfree(par->vbe_state_saved);
35747 +
35748 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35749 + if (par->pmi_code)
35750 + module_free_exec(NULL, par->pmi_code);
35751 +#endif
35752 +
35753 }
35754
35755 framebuffer_release(info);
35756 diff -urNp linux-2.6.32.42/drivers/video/vesafb.c linux-2.6.32.42/drivers/video/vesafb.c
35757 --- linux-2.6.32.42/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
35758 +++ linux-2.6.32.42/drivers/video/vesafb.c 2011-04-17 15:56:46.000000000 -0400
35759 @@ -9,6 +9,7 @@
35760 */
35761
35762 #include <linux/module.h>
35763 +#include <linux/moduleloader.h>
35764 #include <linux/kernel.h>
35765 #include <linux/errno.h>
35766 #include <linux/string.h>
35767 @@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
35768 static int vram_total __initdata; /* Set total amount of memory */
35769 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
35770 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
35771 -static void (*pmi_start)(void) __read_mostly;
35772 -static void (*pmi_pal) (void) __read_mostly;
35773 +static void (*pmi_start)(void) __read_only;
35774 +static void (*pmi_pal) (void) __read_only;
35775 static int depth __read_mostly;
35776 static int vga_compat __read_mostly;
35777 /* --------------------------------------------------------------------- */
35778 @@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
35779 unsigned int size_vmode;
35780 unsigned int size_remap;
35781 unsigned int size_total;
35782 + void *pmi_code = NULL;
35783
35784 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
35785 return -ENODEV;
35786 @@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
35787 size_remap = size_total;
35788 vesafb_fix.smem_len = size_remap;
35789
35790 -#ifndef __i386__
35791 - screen_info.vesapm_seg = 0;
35792 -#endif
35793 -
35794 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
35795 printk(KERN_WARNING
35796 "vesafb: cannot reserve video memory at 0x%lx\n",
35797 @@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
35798 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
35799 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
35800
35801 +#ifdef __i386__
35802 +
35803 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35804 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
35805 + if (!pmi_code)
35806 +#elif !defined(CONFIG_PAX_KERNEXEC)
35807 + if (0)
35808 +#endif
35809 +
35810 +#endif
35811 + screen_info.vesapm_seg = 0;
35812 +
35813 if (screen_info.vesapm_seg) {
35814 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
35815 - screen_info.vesapm_seg,screen_info.vesapm_off);
35816 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
35817 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
35818 }
35819
35820 if (screen_info.vesapm_seg < 0xc000)
35821 @@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
35822
35823 if (ypan || pmi_setpal) {
35824 unsigned short *pmi_base;
35825 - pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
35826 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
35827 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
35828 +
35829 + pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
35830 +
35831 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35832 + pax_open_kernel();
35833 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
35834 +#else
35835 + pmi_code = pmi_base;
35836 +#endif
35837 +
35838 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
35839 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
35840 +
35841 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35842 + pmi_start = ktva_ktla(pmi_start);
35843 + pmi_pal = ktva_ktla(pmi_pal);
35844 + pax_close_kernel();
35845 +#endif
35846 +
35847 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
35848 if (pmi_base[3]) {
35849 printk(KERN_INFO "vesafb: pmi: ports = ");
35850 @@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
35851 info->node, info->fix.id);
35852 return 0;
35853 err:
35854 +
35855 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
35856 + module_free_exec(NULL, pmi_code);
35857 +#endif
35858 +
35859 if (info->screen_base)
35860 iounmap(info->screen_base);
35861 framebuffer_release(info);
35862 diff -urNp linux-2.6.32.42/drivers/xen/sys-hypervisor.c linux-2.6.32.42/drivers/xen/sys-hypervisor.c
35863 --- linux-2.6.32.42/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
35864 +++ linux-2.6.32.42/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
35865 @@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
35866 return 0;
35867 }
35868
35869 -static struct sysfs_ops hyp_sysfs_ops = {
35870 +static const struct sysfs_ops hyp_sysfs_ops = {
35871 .show = hyp_sysfs_show,
35872 .store = hyp_sysfs_store,
35873 };
35874 diff -urNp linux-2.6.32.42/fs/9p/vfs_inode.c linux-2.6.32.42/fs/9p/vfs_inode.c
35875 --- linux-2.6.32.42/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
35876 +++ linux-2.6.32.42/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
35877 @@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
35878 static void
35879 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
35880 {
35881 - char *s = nd_get_link(nd);
35882 + const char *s = nd_get_link(nd);
35883
35884 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
35885 IS_ERR(s) ? "<error>" : s);
35886 diff -urNp linux-2.6.32.42/fs/aio.c linux-2.6.32.42/fs/aio.c
35887 --- linux-2.6.32.42/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
35888 +++ linux-2.6.32.42/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
35889 @@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
35890 size += sizeof(struct io_event) * nr_events;
35891 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
35892
35893 - if (nr_pages < 0)
35894 + if (nr_pages <= 0)
35895 return -EINVAL;
35896
35897 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
35898 @@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
35899 struct aio_timeout to;
35900 int retry = 0;
35901
35902 + pax_track_stack();
35903 +
35904 /* needed to zero any padding within an entry (there shouldn't be
35905 * any, but C is fun!
35906 */
35907 @@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
35908 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
35909 {
35910 ssize_t ret;
35911 + struct iovec iovstack;
35912
35913 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
35914 kiocb->ki_nbytes, 1,
35915 - &kiocb->ki_inline_vec, &kiocb->ki_iovec);
35916 + &iovstack, &kiocb->ki_iovec);
35917 if (ret < 0)
35918 goto out;
35919
35920 + if (kiocb->ki_iovec == &iovstack) {
35921 + kiocb->ki_inline_vec = iovstack;
35922 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
35923 + }
35924 kiocb->ki_nr_segs = kiocb->ki_nbytes;
35925 kiocb->ki_cur_seg = 0;
35926 /* ki_nbytes/left now reflect bytes instead of segs */
35927 diff -urNp linux-2.6.32.42/fs/attr.c linux-2.6.32.42/fs/attr.c
35928 --- linux-2.6.32.42/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
35929 +++ linux-2.6.32.42/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
35930 @@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
35931 unsigned long limit;
35932
35933 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
35934 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
35935 if (limit != RLIM_INFINITY && offset > limit)
35936 goto out_sig;
35937 if (offset > inode->i_sb->s_maxbytes)
35938 diff -urNp linux-2.6.32.42/fs/autofs/root.c linux-2.6.32.42/fs/autofs/root.c
35939 --- linux-2.6.32.42/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
35940 +++ linux-2.6.32.42/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
35941 @@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
35942 set_bit(n,sbi->symlink_bitmap);
35943 sl = &sbi->symlink[n];
35944 sl->len = strlen(symname);
35945 - sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
35946 + slsize = sl->len+1;
35947 + sl->data = kmalloc(slsize, GFP_KERNEL);
35948 if (!sl->data) {
35949 clear_bit(n,sbi->symlink_bitmap);
35950 unlock_kernel();
35951 diff -urNp linux-2.6.32.42/fs/autofs4/symlink.c linux-2.6.32.42/fs/autofs4/symlink.c
35952 --- linux-2.6.32.42/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
35953 +++ linux-2.6.32.42/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
35954 @@ -15,7 +15,7 @@
35955 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
35956 {
35957 struct autofs_info *ino = autofs4_dentry_ino(dentry);
35958 - nd_set_link(nd, (char *)ino->u.symlink);
35959 + nd_set_link(nd, ino->u.symlink);
35960 return NULL;
35961 }
35962
35963 diff -urNp linux-2.6.32.42/fs/befs/linuxvfs.c linux-2.6.32.42/fs/befs/linuxvfs.c
35964 --- linux-2.6.32.42/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
35965 +++ linux-2.6.32.42/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
35966 @@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
35967 {
35968 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
35969 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
35970 - char *link = nd_get_link(nd);
35971 + const char *link = nd_get_link(nd);
35972 if (!IS_ERR(link))
35973 kfree(link);
35974 }
35975 diff -urNp linux-2.6.32.42/fs/binfmt_aout.c linux-2.6.32.42/fs/binfmt_aout.c
35976 --- linux-2.6.32.42/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
35977 +++ linux-2.6.32.42/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
35978 @@ -16,6 +16,7 @@
35979 #include <linux/string.h>
35980 #include <linux/fs.h>
35981 #include <linux/file.h>
35982 +#include <linux/security.h>
35983 #include <linux/stat.h>
35984 #include <linux/fcntl.h>
35985 #include <linux/ptrace.h>
35986 @@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
35987 #endif
35988 # define START_STACK(u) (u.start_stack)
35989
35990 + memset(&dump, 0, sizeof(dump));
35991 +
35992 fs = get_fs();
35993 set_fs(KERNEL_DS);
35994 has_dumped = 1;
35995 @@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
35996
35997 /* If the size of the dump file exceeds the rlimit, then see what would happen
35998 if we wrote the stack, but not the data area. */
35999 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
36000 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
36001 dump.u_dsize = 0;
36002
36003 /* Make sure we have enough room to write the stack and data areas. */
36004 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
36005 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
36006 dump.u_ssize = 0;
36007
36008 @@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
36009 dump_size = dump.u_ssize << PAGE_SHIFT;
36010 DUMP_WRITE(dump_start,dump_size);
36011 }
36012 -/* Finally dump the task struct. Not be used by gdb, but could be useful */
36013 - set_fs(KERNEL_DS);
36014 - DUMP_WRITE(current,sizeof(*current));
36015 +/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
36016 end_coredump:
36017 set_fs(fs);
36018 return has_dumped;
36019 @@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
36020 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
36021 if (rlim >= RLIM_INFINITY)
36022 rlim = ~0;
36023 +
36024 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
36025 if (ex.a_data + ex.a_bss > rlim)
36026 return -ENOMEM;
36027
36028 @@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
36029 install_exec_creds(bprm);
36030 current->flags &= ~PF_FORKNOEXEC;
36031
36032 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36033 + current->mm->pax_flags = 0UL;
36034 +#endif
36035 +
36036 +#ifdef CONFIG_PAX_PAGEEXEC
36037 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
36038 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
36039 +
36040 +#ifdef CONFIG_PAX_EMUTRAMP
36041 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
36042 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
36043 +#endif
36044 +
36045 +#ifdef CONFIG_PAX_MPROTECT
36046 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
36047 + current->mm->pax_flags |= MF_PAX_MPROTECT;
36048 +#endif
36049 +
36050 + }
36051 +#endif
36052 +
36053 if (N_MAGIC(ex) == OMAGIC) {
36054 unsigned long text_addr, map_size;
36055 loff_t pos;
36056 @@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
36057
36058 down_write(&current->mm->mmap_sem);
36059 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
36060 - PROT_READ | PROT_WRITE | PROT_EXEC,
36061 + PROT_READ | PROT_WRITE,
36062 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
36063 fd_offset + ex.a_text);
36064 up_write(&current->mm->mmap_sem);
36065 diff -urNp linux-2.6.32.42/fs/binfmt_elf.c linux-2.6.32.42/fs/binfmt_elf.c
36066 --- linux-2.6.32.42/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
36067 +++ linux-2.6.32.42/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
36068 @@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
36069 #define elf_core_dump NULL
36070 #endif
36071
36072 +#ifdef CONFIG_PAX_MPROTECT
36073 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
36074 +#endif
36075 +
36076 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
36077 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
36078 #else
36079 @@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
36080 .load_binary = load_elf_binary,
36081 .load_shlib = load_elf_library,
36082 .core_dump = elf_core_dump,
36083 +
36084 +#ifdef CONFIG_PAX_MPROTECT
36085 + .handle_mprotect= elf_handle_mprotect,
36086 +#endif
36087 +
36088 .min_coredump = ELF_EXEC_PAGESIZE,
36089 .hasvdso = 1
36090 };
36091 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
36092
36093 static int set_brk(unsigned long start, unsigned long end)
36094 {
36095 + unsigned long e = end;
36096 +
36097 start = ELF_PAGEALIGN(start);
36098 end = ELF_PAGEALIGN(end);
36099 if (end > start) {
36100 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
36101 if (BAD_ADDR(addr))
36102 return addr;
36103 }
36104 - current->mm->start_brk = current->mm->brk = end;
36105 + current->mm->start_brk = current->mm->brk = e;
36106 return 0;
36107 }
36108
36109 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
36110 elf_addr_t __user *u_rand_bytes;
36111 const char *k_platform = ELF_PLATFORM;
36112 const char *k_base_platform = ELF_BASE_PLATFORM;
36113 - unsigned char k_rand_bytes[16];
36114 + u32 k_rand_bytes[4];
36115 int items;
36116 elf_addr_t *elf_info;
36117 int ei_index = 0;
36118 const struct cred *cred = current_cred();
36119 struct vm_area_struct *vma;
36120 + unsigned long saved_auxv[AT_VECTOR_SIZE];
36121 +
36122 + pax_track_stack();
36123
36124 /*
36125 * In some cases (e.g. Hyper-Threading), we want to avoid L1
36126 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
36127 * Generate 16 random bytes for userspace PRNG seeding.
36128 */
36129 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
36130 - u_rand_bytes = (elf_addr_t __user *)
36131 - STACK_ALLOC(p, sizeof(k_rand_bytes));
36132 + srandom32(k_rand_bytes[0] ^ random32());
36133 + srandom32(k_rand_bytes[1] ^ random32());
36134 + srandom32(k_rand_bytes[2] ^ random32());
36135 + srandom32(k_rand_bytes[3] ^ random32());
36136 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
36137 + u_rand_bytes = (elf_addr_t __user *) p;
36138 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
36139 return -EFAULT;
36140
36141 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
36142 return -EFAULT;
36143 current->mm->env_end = p;
36144
36145 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
36146 +
36147 /* Put the elf_info on the stack in the right place. */
36148 sp = (elf_addr_t __user *)envp + 1;
36149 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
36150 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
36151 return -EFAULT;
36152 return 0;
36153 }
36154 @@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
36155 {
36156 struct elf_phdr *elf_phdata;
36157 struct elf_phdr *eppnt;
36158 - unsigned long load_addr = 0;
36159 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
36160 int load_addr_set = 0;
36161 unsigned long last_bss = 0, elf_bss = 0;
36162 - unsigned long error = ~0UL;
36163 + unsigned long error = -EINVAL;
36164 unsigned long total_size;
36165 int retval, i, size;
36166
36167 @@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
36168 goto out_close;
36169 }
36170
36171 +#ifdef CONFIG_PAX_SEGMEXEC
36172 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
36173 + pax_task_size = SEGMEXEC_TASK_SIZE;
36174 +#endif
36175 +
36176 eppnt = elf_phdata;
36177 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
36178 if (eppnt->p_type == PT_LOAD) {
36179 @@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
36180 k = load_addr + eppnt->p_vaddr;
36181 if (BAD_ADDR(k) ||
36182 eppnt->p_filesz > eppnt->p_memsz ||
36183 - eppnt->p_memsz > TASK_SIZE ||
36184 - TASK_SIZE - eppnt->p_memsz < k) {
36185 + eppnt->p_memsz > pax_task_size ||
36186 + pax_task_size - eppnt->p_memsz < k) {
36187 error = -ENOMEM;
36188 goto out_close;
36189 }
36190 @@ -532,6 +557,194 @@ out:
36191 return error;
36192 }
36193
36194 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
36195 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
36196 +{
36197 + unsigned long pax_flags = 0UL;
36198 +
36199 +#ifdef CONFIG_PAX_PAGEEXEC
36200 + if (elf_phdata->p_flags & PF_PAGEEXEC)
36201 + pax_flags |= MF_PAX_PAGEEXEC;
36202 +#endif
36203 +
36204 +#ifdef CONFIG_PAX_SEGMEXEC
36205 + if (elf_phdata->p_flags & PF_SEGMEXEC)
36206 + pax_flags |= MF_PAX_SEGMEXEC;
36207 +#endif
36208 +
36209 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36210 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36211 + if (nx_enabled)
36212 + pax_flags &= ~MF_PAX_SEGMEXEC;
36213 + else
36214 + pax_flags &= ~MF_PAX_PAGEEXEC;
36215 + }
36216 +#endif
36217 +
36218 +#ifdef CONFIG_PAX_EMUTRAMP
36219 + if (elf_phdata->p_flags & PF_EMUTRAMP)
36220 + pax_flags |= MF_PAX_EMUTRAMP;
36221 +#endif
36222 +
36223 +#ifdef CONFIG_PAX_MPROTECT
36224 + if (elf_phdata->p_flags & PF_MPROTECT)
36225 + pax_flags |= MF_PAX_MPROTECT;
36226 +#endif
36227 +
36228 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36229 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
36230 + pax_flags |= MF_PAX_RANDMMAP;
36231 +#endif
36232 +
36233 + return pax_flags;
36234 +}
36235 +#endif
36236 +
36237 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36238 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
36239 +{
36240 + unsigned long pax_flags = 0UL;
36241 +
36242 +#ifdef CONFIG_PAX_PAGEEXEC
36243 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
36244 + pax_flags |= MF_PAX_PAGEEXEC;
36245 +#endif
36246 +
36247 +#ifdef CONFIG_PAX_SEGMEXEC
36248 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
36249 + pax_flags |= MF_PAX_SEGMEXEC;
36250 +#endif
36251 +
36252 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36253 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36254 + if (nx_enabled)
36255 + pax_flags &= ~MF_PAX_SEGMEXEC;
36256 + else
36257 + pax_flags &= ~MF_PAX_PAGEEXEC;
36258 + }
36259 +#endif
36260 +
36261 +#ifdef CONFIG_PAX_EMUTRAMP
36262 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
36263 + pax_flags |= MF_PAX_EMUTRAMP;
36264 +#endif
36265 +
36266 +#ifdef CONFIG_PAX_MPROTECT
36267 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
36268 + pax_flags |= MF_PAX_MPROTECT;
36269 +#endif
36270 +
36271 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36272 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
36273 + pax_flags |= MF_PAX_RANDMMAP;
36274 +#endif
36275 +
36276 + return pax_flags;
36277 +}
36278 +#endif
36279 +
36280 +#ifdef CONFIG_PAX_EI_PAX
36281 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
36282 +{
36283 + unsigned long pax_flags = 0UL;
36284 +
36285 +#ifdef CONFIG_PAX_PAGEEXEC
36286 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
36287 + pax_flags |= MF_PAX_PAGEEXEC;
36288 +#endif
36289 +
36290 +#ifdef CONFIG_PAX_SEGMEXEC
36291 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
36292 + pax_flags |= MF_PAX_SEGMEXEC;
36293 +#endif
36294 +
36295 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36296 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36297 + if (nx_enabled)
36298 + pax_flags &= ~MF_PAX_SEGMEXEC;
36299 + else
36300 + pax_flags &= ~MF_PAX_PAGEEXEC;
36301 + }
36302 +#endif
36303 +
36304 +#ifdef CONFIG_PAX_EMUTRAMP
36305 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
36306 + pax_flags |= MF_PAX_EMUTRAMP;
36307 +#endif
36308 +
36309 +#ifdef CONFIG_PAX_MPROTECT
36310 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
36311 + pax_flags |= MF_PAX_MPROTECT;
36312 +#endif
36313 +
36314 +#ifdef CONFIG_PAX_ASLR
36315 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
36316 + pax_flags |= MF_PAX_RANDMMAP;
36317 +#endif
36318 +
36319 + return pax_flags;
36320 +}
36321 +#endif
36322 +
36323 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36324 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
36325 +{
36326 + unsigned long pax_flags = 0UL;
36327 +
36328 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36329 + unsigned long i;
36330 + int found_flags = 0;
36331 +#endif
36332 +
36333 +#ifdef CONFIG_PAX_EI_PAX
36334 + pax_flags = pax_parse_ei_pax(elf_ex);
36335 +#endif
36336 +
36337 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
36338 + for (i = 0UL; i < elf_ex->e_phnum; i++)
36339 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
36340 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
36341 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
36342 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
36343 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
36344 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
36345 + return -EINVAL;
36346 +
36347 +#ifdef CONFIG_PAX_SOFTMODE
36348 + if (pax_softmode)
36349 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
36350 + else
36351 +#endif
36352 +
36353 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
36354 + found_flags = 1;
36355 + break;
36356 + }
36357 +#endif
36358 +
36359 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
36360 + if (found_flags == 0) {
36361 + struct elf_phdr phdr;
36362 + memset(&phdr, 0, sizeof(phdr));
36363 + phdr.p_flags = PF_NOEMUTRAMP;
36364 +#ifdef CONFIG_PAX_SOFTMODE
36365 + if (pax_softmode)
36366 + pax_flags = pax_parse_softmode(&phdr);
36367 + else
36368 +#endif
36369 + pax_flags = pax_parse_hardmode(&phdr);
36370 + }
36371 +#endif
36372 +
36373 +
36374 + if (0 > pax_check_flags(&pax_flags))
36375 + return -EINVAL;
36376 +
36377 + current->mm->pax_flags = pax_flags;
36378 + return 0;
36379 +}
36380 +#endif
36381 +
36382 /*
36383 * These are the functions used to load ELF style executables and shared
36384 * libraries. There is no binary dependent code anywhere else.
36385 @@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
36386 {
36387 unsigned int random_variable = 0;
36388
36389 +#ifdef CONFIG_PAX_RANDUSTACK
36390 + if (randomize_va_space)
36391 + return stack_top - current->mm->delta_stack;
36392 +#endif
36393 +
36394 if ((current->flags & PF_RANDOMIZE) &&
36395 !(current->personality & ADDR_NO_RANDOMIZE)) {
36396 random_variable = get_random_int() & STACK_RND_MASK;
36397 @@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
36398 unsigned long load_addr = 0, load_bias = 0;
36399 int load_addr_set = 0;
36400 char * elf_interpreter = NULL;
36401 - unsigned long error;
36402 + unsigned long error = 0;
36403 struct elf_phdr *elf_ppnt, *elf_phdata;
36404 unsigned long elf_bss, elf_brk;
36405 int retval, i;
36406 @@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
36407 unsigned long start_code, end_code, start_data, end_data;
36408 unsigned long reloc_func_desc = 0;
36409 int executable_stack = EXSTACK_DEFAULT;
36410 - unsigned long def_flags = 0;
36411 struct {
36412 struct elfhdr elf_ex;
36413 struct elfhdr interp_elf_ex;
36414 } *loc;
36415 + unsigned long pax_task_size = TASK_SIZE;
36416
36417 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
36418 if (!loc) {
36419 @@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
36420
36421 /* OK, This is the point of no return */
36422 current->flags &= ~PF_FORKNOEXEC;
36423 - current->mm->def_flags = def_flags;
36424 +
36425 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36426 + current->mm->pax_flags = 0UL;
36427 +#endif
36428 +
36429 +#ifdef CONFIG_PAX_DLRESOLVE
36430 + current->mm->call_dl_resolve = 0UL;
36431 +#endif
36432 +
36433 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
36434 + current->mm->call_syscall = 0UL;
36435 +#endif
36436 +
36437 +#ifdef CONFIG_PAX_ASLR
36438 + current->mm->delta_mmap = 0UL;
36439 + current->mm->delta_stack = 0UL;
36440 +#endif
36441 +
36442 + current->mm->def_flags = 0;
36443 +
36444 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
36445 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
36446 + send_sig(SIGKILL, current, 0);
36447 + goto out_free_dentry;
36448 + }
36449 +#endif
36450 +
36451 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
36452 + pax_set_initial_flags(bprm);
36453 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
36454 + if (pax_set_initial_flags_func)
36455 + (pax_set_initial_flags_func)(bprm);
36456 +#endif
36457 +
36458 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
36459 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
36460 + current->mm->context.user_cs_limit = PAGE_SIZE;
36461 + current->mm->def_flags |= VM_PAGEEXEC;
36462 + }
36463 +#endif
36464 +
36465 +#ifdef CONFIG_PAX_SEGMEXEC
36466 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
36467 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
36468 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
36469 + pax_task_size = SEGMEXEC_TASK_SIZE;
36470 + }
36471 +#endif
36472 +
36473 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
36474 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36475 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
36476 + put_cpu();
36477 + }
36478 +#endif
36479
36480 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
36481 may depend on the personality. */
36482 SET_PERSONALITY(loc->elf_ex);
36483 +
36484 +#ifdef CONFIG_PAX_ASLR
36485 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
36486 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
36487 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
36488 + }
36489 +#endif
36490 +
36491 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
36492 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36493 + executable_stack = EXSTACK_DISABLE_X;
36494 + current->personality &= ~READ_IMPLIES_EXEC;
36495 + } else
36496 +#endif
36497 +
36498 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
36499 current->personality |= READ_IMPLIES_EXEC;
36500
36501 @@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
36502 #else
36503 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
36504 #endif
36505 +
36506 +#ifdef CONFIG_PAX_RANDMMAP
36507 + /* PaX: randomize base address at the default exe base if requested */
36508 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
36509 +#ifdef CONFIG_SPARC64
36510 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
36511 +#else
36512 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
36513 +#endif
36514 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
36515 + elf_flags |= MAP_FIXED;
36516 + }
36517 +#endif
36518 +
36519 }
36520
36521 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
36522 @@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
36523 * allowed task size. Note that p_filesz must always be
36524 * <= p_memsz so it is only necessary to check p_memsz.
36525 */
36526 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
36527 - elf_ppnt->p_memsz > TASK_SIZE ||
36528 - TASK_SIZE - elf_ppnt->p_memsz < k) {
36529 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
36530 + elf_ppnt->p_memsz > pax_task_size ||
36531 + pax_task_size - elf_ppnt->p_memsz < k) {
36532 /* set_brk can never work. Avoid overflows. */
36533 send_sig(SIGKILL, current, 0);
36534 retval = -EINVAL;
36535 @@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
36536 start_data += load_bias;
36537 end_data += load_bias;
36538
36539 +#ifdef CONFIG_PAX_RANDMMAP
36540 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
36541 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
36542 +#endif
36543 +
36544 /* Calling set_brk effectively mmaps the pages that we need
36545 * for the bss and break sections. We must do this before
36546 * mapping in the interpreter, to make sure it doesn't wind
36547 @@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
36548 goto out_free_dentry;
36549 }
36550 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
36551 - send_sig(SIGSEGV, current, 0);
36552 - retval = -EFAULT; /* Nobody gets to see this, but.. */
36553 - goto out_free_dentry;
36554 + /*
36555 + * This bss-zeroing can fail if the ELF
36556 + * file specifies odd protections. So
36557 + * we don't check the return value
36558 + */
36559 }
36560
36561 if (elf_interpreter) {
36562 @@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
36563 unsigned long n = off;
36564 if (n > PAGE_SIZE)
36565 n = PAGE_SIZE;
36566 - if (!dump_write(file, buf, n))
36567 + if (!dump_write(file, buf, n)) {
36568 + free_page((unsigned long)buf);
36569 return 0;
36570 + }
36571 off -= n;
36572 }
36573 free_page((unsigned long)buf);
36574 @@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
36575 * Decide what to dump of a segment, part, all or none.
36576 */
36577 static unsigned long vma_dump_size(struct vm_area_struct *vma,
36578 - unsigned long mm_flags)
36579 + unsigned long mm_flags, long signr)
36580 {
36581 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
36582
36583 @@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
36584 if (vma->vm_file == NULL)
36585 return 0;
36586
36587 - if (FILTER(MAPPED_PRIVATE))
36588 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
36589 goto whole;
36590
36591 /*
36592 @@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
36593 #undef DUMP_WRITE
36594
36595 #define DUMP_WRITE(addr, nr) \
36596 + do { \
36597 + gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
36598 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
36599 - goto end_coredump;
36600 + goto end_coredump; \
36601 + } while (0);
36602
36603 static void fill_elf_header(struct elfhdr *elf, int segs,
36604 u16 machine, u32 flags, u8 osabi)
36605 @@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
36606 {
36607 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
36608 int i = 0;
36609 - do
36610 + do {
36611 i += 2;
36612 - while (auxv[i - 2] != AT_NULL);
36613 + } while (auxv[i - 2] != AT_NULL);
36614 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
36615 }
36616
36617 @@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
36618 phdr.p_offset = offset;
36619 phdr.p_vaddr = vma->vm_start;
36620 phdr.p_paddr = 0;
36621 - phdr.p_filesz = vma_dump_size(vma, mm_flags);
36622 + phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
36623 phdr.p_memsz = vma->vm_end - vma->vm_start;
36624 offset += phdr.p_filesz;
36625 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
36626 @@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
36627 unsigned long addr;
36628 unsigned long end;
36629
36630 - end = vma->vm_start + vma_dump_size(vma, mm_flags);
36631 + end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
36632
36633 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
36634 struct page *page;
36635 @@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
36636 page = get_dump_page(addr);
36637 if (page) {
36638 void *kaddr = kmap(page);
36639 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
36640 stop = ((size += PAGE_SIZE) > limit) ||
36641 !dump_write(file, kaddr, PAGE_SIZE);
36642 kunmap(page);
36643 @@ -2042,6 +2356,97 @@ out:
36644
36645 #endif /* USE_ELF_CORE_DUMP */
36646
36647 +#ifdef CONFIG_PAX_MPROTECT
36648 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
36649 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
36650 + * we'll remove VM_MAYWRITE for good on RELRO segments.
36651 + *
36652 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
36653 + * basis because we want to allow the common case and not the special ones.
36654 + */
36655 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
36656 +{
36657 + struct elfhdr elf_h;
36658 + struct elf_phdr elf_p;
36659 + unsigned long i;
36660 + unsigned long oldflags;
36661 + bool is_textrel_rw, is_textrel_rx, is_relro;
36662 +
36663 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
36664 + return;
36665 +
36666 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
36667 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
36668 +
36669 +#ifdef CONFIG_PAX_ELFRELOCS
36670 + /* possible TEXTREL */
36671 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
36672 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
36673 +#else
36674 + is_textrel_rw = false;
36675 + is_textrel_rx = false;
36676 +#endif
36677 +
36678 + /* possible RELRO */
36679 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
36680 +
36681 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
36682 + return;
36683 +
36684 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
36685 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
36686 +
36687 +#ifdef CONFIG_PAX_ETEXECRELOCS
36688 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
36689 +#else
36690 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
36691 +#endif
36692 +
36693 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
36694 + !elf_check_arch(&elf_h) ||
36695 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
36696 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
36697 + return;
36698 +
36699 + for (i = 0UL; i < elf_h.e_phnum; i++) {
36700 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
36701 + return;
36702 + switch (elf_p.p_type) {
36703 + case PT_DYNAMIC:
36704 + if (!is_textrel_rw && !is_textrel_rx)
36705 + continue;
36706 + i = 0UL;
36707 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
36708 + elf_dyn dyn;
36709 +
36710 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
36711 + return;
36712 + if (dyn.d_tag == DT_NULL)
36713 + return;
36714 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
36715 + gr_log_textrel(vma);
36716 + if (is_textrel_rw)
36717 + vma->vm_flags |= VM_MAYWRITE;
36718 + else
36719 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
36720 + vma->vm_flags &= ~VM_MAYWRITE;
36721 + return;
36722 + }
36723 + i++;
36724 + }
36725 + return;
36726 +
36727 + case PT_GNU_RELRO:
36728 + if (!is_relro)
36729 + continue;
36730 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
36731 + vma->vm_flags &= ~VM_MAYWRITE;
36732 + return;
36733 + }
36734 + }
36735 +}
36736 +#endif
36737 +
36738 static int __init init_elf_binfmt(void)
36739 {
36740 return register_binfmt(&elf_format);
36741 diff -urNp linux-2.6.32.42/fs/binfmt_flat.c linux-2.6.32.42/fs/binfmt_flat.c
36742 --- linux-2.6.32.42/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
36743 +++ linux-2.6.32.42/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
36744 @@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
36745 realdatastart = (unsigned long) -ENOMEM;
36746 printk("Unable to allocate RAM for process data, errno %d\n",
36747 (int)-realdatastart);
36748 + down_write(&current->mm->mmap_sem);
36749 do_munmap(current->mm, textpos, text_len);
36750 + up_write(&current->mm->mmap_sem);
36751 ret = realdatastart;
36752 goto err;
36753 }
36754 @@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
36755 }
36756 if (IS_ERR_VALUE(result)) {
36757 printk("Unable to read data+bss, errno %d\n", (int)-result);
36758 + down_write(&current->mm->mmap_sem);
36759 do_munmap(current->mm, textpos, text_len);
36760 do_munmap(current->mm, realdatastart, data_len + extra);
36761 + up_write(&current->mm->mmap_sem);
36762 ret = result;
36763 goto err;
36764 }
36765 @@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
36766 }
36767 if (IS_ERR_VALUE(result)) {
36768 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
36769 + down_write(&current->mm->mmap_sem);
36770 do_munmap(current->mm, textpos, text_len + data_len + extra +
36771 MAX_SHARED_LIBS * sizeof(unsigned long));
36772 + up_write(&current->mm->mmap_sem);
36773 ret = result;
36774 goto err;
36775 }
36776 diff -urNp linux-2.6.32.42/fs/bio.c linux-2.6.32.42/fs/bio.c
36777 --- linux-2.6.32.42/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
36778 +++ linux-2.6.32.42/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
36779 @@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
36780
36781 i = 0;
36782 while (i < bio_slab_nr) {
36783 - struct bio_slab *bslab = &bio_slabs[i];
36784 + bslab = &bio_slabs[i];
36785
36786 if (!bslab->slab && entry == -1)
36787 entry = i;
36788 @@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
36789 const int read = bio_data_dir(bio) == READ;
36790 struct bio_map_data *bmd = bio->bi_private;
36791 int i;
36792 - char *p = bmd->sgvecs[0].iov_base;
36793 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
36794
36795 __bio_for_each_segment(bvec, bio, i, 0) {
36796 char *addr = page_address(bvec->bv_page);
36797 diff -urNp linux-2.6.32.42/fs/block_dev.c linux-2.6.32.42/fs/block_dev.c
36798 --- linux-2.6.32.42/fs/block_dev.c 2011-06-25 12:55:34.000000000 -0400
36799 +++ linux-2.6.32.42/fs/block_dev.c 2011-06-25 12:56:37.000000000 -0400
36800 @@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
36801 else if (bdev->bd_contains == bdev)
36802 res = 0; /* is a whole device which isn't held */
36803
36804 - else if (bdev->bd_contains->bd_holder == bd_claim)
36805 + else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
36806 res = 0; /* is a partition of a device that is being partitioned */
36807 else if (bdev->bd_contains->bd_holder != NULL)
36808 res = -EBUSY; /* is a partition of a held device */
36809 diff -urNp linux-2.6.32.42/fs/btrfs/ctree.c linux-2.6.32.42/fs/btrfs/ctree.c
36810 --- linux-2.6.32.42/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
36811 +++ linux-2.6.32.42/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
36812 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
36813 free_extent_buffer(buf);
36814 add_root_to_dirty_list(root);
36815 } else {
36816 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
36817 - parent_start = parent->start;
36818 - else
36819 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
36820 + if (parent)
36821 + parent_start = parent->start;
36822 + else
36823 + parent_start = 0;
36824 + } else
36825 parent_start = 0;
36826
36827 WARN_ON(trans->transid != btrfs_header_generation(parent));
36828 @@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
36829
36830 ret = 0;
36831 if (slot == 0) {
36832 - struct btrfs_disk_key disk_key;
36833 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
36834 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
36835 }
36836 diff -urNp linux-2.6.32.42/fs/btrfs/disk-io.c linux-2.6.32.42/fs/btrfs/disk-io.c
36837 --- linux-2.6.32.42/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
36838 +++ linux-2.6.32.42/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
36839 @@ -39,7 +39,7 @@
36840 #include "tree-log.h"
36841 #include "free-space-cache.h"
36842
36843 -static struct extent_io_ops btree_extent_io_ops;
36844 +static const struct extent_io_ops btree_extent_io_ops;
36845 static void end_workqueue_fn(struct btrfs_work *work);
36846 static void free_fs_root(struct btrfs_root *root);
36847
36848 @@ -2607,7 +2607,7 @@ out:
36849 return 0;
36850 }
36851
36852 -static struct extent_io_ops btree_extent_io_ops = {
36853 +static const struct extent_io_ops btree_extent_io_ops = {
36854 .write_cache_pages_lock_hook = btree_lock_page_hook,
36855 .readpage_end_io_hook = btree_readpage_end_io_hook,
36856 .submit_bio_hook = btree_submit_bio_hook,
36857 diff -urNp linux-2.6.32.42/fs/btrfs/extent_io.h linux-2.6.32.42/fs/btrfs/extent_io.h
36858 --- linux-2.6.32.42/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
36859 +++ linux-2.6.32.42/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
36860 @@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
36861 struct bio *bio, int mirror_num,
36862 unsigned long bio_flags);
36863 struct extent_io_ops {
36864 - int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
36865 + int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
36866 u64 start, u64 end, int *page_started,
36867 unsigned long *nr_written);
36868 - int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
36869 - int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
36870 + int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
36871 + int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
36872 extent_submit_bio_hook_t *submit_bio_hook;
36873 - int (*merge_bio_hook)(struct page *page, unsigned long offset,
36874 + int (* const merge_bio_hook)(struct page *page, unsigned long offset,
36875 size_t size, struct bio *bio,
36876 unsigned long bio_flags);
36877 - int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
36878 - int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
36879 + int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
36880 + int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
36881 u64 start, u64 end,
36882 struct extent_state *state);
36883 - int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
36884 + int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
36885 u64 start, u64 end,
36886 struct extent_state *state);
36887 - int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
36888 + int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
36889 struct extent_state *state);
36890 - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
36891 + int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
36892 struct extent_state *state, int uptodate);
36893 - int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
36894 + int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
36895 unsigned long old, unsigned long bits);
36896 - int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
36897 + int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
36898 unsigned long bits);
36899 - int (*merge_extent_hook)(struct inode *inode,
36900 + int (* const merge_extent_hook)(struct inode *inode,
36901 struct extent_state *new,
36902 struct extent_state *other);
36903 - int (*split_extent_hook)(struct inode *inode,
36904 + int (* const split_extent_hook)(struct inode *inode,
36905 struct extent_state *orig, u64 split);
36906 - int (*write_cache_pages_lock_hook)(struct page *page);
36907 + int (* const write_cache_pages_lock_hook)(struct page *page);
36908 };
36909
36910 struct extent_io_tree {
36911 @@ -88,7 +88,7 @@ struct extent_io_tree {
36912 u64 dirty_bytes;
36913 spinlock_t lock;
36914 spinlock_t buffer_lock;
36915 - struct extent_io_ops *ops;
36916 + const struct extent_io_ops *ops;
36917 };
36918
36919 struct extent_state {
36920 diff -urNp linux-2.6.32.42/fs/btrfs/extent-tree.c linux-2.6.32.42/fs/btrfs/extent-tree.c
36921 --- linux-2.6.32.42/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
36922 +++ linux-2.6.32.42/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
36923 @@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
36924 u64 group_start = group->key.objectid;
36925 new_extents = kmalloc(sizeof(*new_extents),
36926 GFP_NOFS);
36927 + if (!new_extents) {
36928 + ret = -ENOMEM;
36929 + goto out;
36930 + }
36931 nr_extents = 1;
36932 ret = get_new_locations(reloc_inode,
36933 extent_key,
36934 diff -urNp linux-2.6.32.42/fs/btrfs/free-space-cache.c linux-2.6.32.42/fs/btrfs/free-space-cache.c
36935 --- linux-2.6.32.42/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
36936 +++ linux-2.6.32.42/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
36937 @@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
36938
36939 while(1) {
36940 if (entry->bytes < bytes || entry->offset < min_start) {
36941 - struct rb_node *node;
36942 -
36943 node = rb_next(&entry->offset_index);
36944 if (!node)
36945 break;
36946 @@ -1226,7 +1224,7 @@ again:
36947 */
36948 while (entry->bitmap || found_bitmap ||
36949 (!entry->bitmap && entry->bytes < min_bytes)) {
36950 - struct rb_node *node = rb_next(&entry->offset_index);
36951 + node = rb_next(&entry->offset_index);
36952
36953 if (entry->bitmap && entry->bytes > bytes + empty_size) {
36954 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
36955 diff -urNp linux-2.6.32.42/fs/btrfs/inode.c linux-2.6.32.42/fs/btrfs/inode.c
36956 --- linux-2.6.32.42/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
36957 +++ linux-2.6.32.42/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
36958 @@ -63,7 +63,7 @@ static const struct inode_operations btr
36959 static const struct address_space_operations btrfs_aops;
36960 static const struct address_space_operations btrfs_symlink_aops;
36961 static const struct file_operations btrfs_dir_file_operations;
36962 -static struct extent_io_ops btrfs_extent_io_ops;
36963 +static const struct extent_io_ops btrfs_extent_io_ops;
36964
36965 static struct kmem_cache *btrfs_inode_cachep;
36966 struct kmem_cache *btrfs_trans_handle_cachep;
36967 @@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
36968 1, 0, NULL, GFP_NOFS);
36969 while (start < end) {
36970 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
36971 + BUG_ON(!async_cow);
36972 async_cow->inode = inode;
36973 async_cow->root = root;
36974 async_cow->locked_page = locked_page;
36975 @@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
36976 inline_size = btrfs_file_extent_inline_item_len(leaf,
36977 btrfs_item_nr(leaf, path->slots[0]));
36978 tmp = kmalloc(inline_size, GFP_NOFS);
36979 + if (!tmp)
36980 + return -ENOMEM;
36981 ptr = btrfs_file_extent_inline_start(item);
36982
36983 read_extent_buffer(leaf, tmp, ptr, inline_size);
36984 @@ -5410,7 +5413,7 @@ fail:
36985 return -ENOMEM;
36986 }
36987
36988 -static int btrfs_getattr(struct vfsmount *mnt,
36989 +int btrfs_getattr(struct vfsmount *mnt,
36990 struct dentry *dentry, struct kstat *stat)
36991 {
36992 struct inode *inode = dentry->d_inode;
36993 @@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
36994 return 0;
36995 }
36996
36997 +EXPORT_SYMBOL(btrfs_getattr);
36998 +
36999 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
37000 +{
37001 + return BTRFS_I(inode)->root->anon_super.s_dev;
37002 +}
37003 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
37004 +
37005 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
37006 struct inode *new_dir, struct dentry *new_dentry)
37007 {
37008 @@ -5972,7 +5983,7 @@ static const struct file_operations btrf
37009 .fsync = btrfs_sync_file,
37010 };
37011
37012 -static struct extent_io_ops btrfs_extent_io_ops = {
37013 +static const struct extent_io_ops btrfs_extent_io_ops = {
37014 .fill_delalloc = run_delalloc_range,
37015 .submit_bio_hook = btrfs_submit_bio_hook,
37016 .merge_bio_hook = btrfs_merge_bio_hook,
37017 diff -urNp linux-2.6.32.42/fs/btrfs/relocation.c linux-2.6.32.42/fs/btrfs/relocation.c
37018 --- linux-2.6.32.42/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
37019 +++ linux-2.6.32.42/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
37020 @@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
37021 }
37022 spin_unlock(&rc->reloc_root_tree.lock);
37023
37024 - BUG_ON((struct btrfs_root *)node->data != root);
37025 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
37026
37027 if (!del) {
37028 spin_lock(&rc->reloc_root_tree.lock);
37029 diff -urNp linux-2.6.32.42/fs/btrfs/sysfs.c linux-2.6.32.42/fs/btrfs/sysfs.c
37030 --- linux-2.6.32.42/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
37031 +++ linux-2.6.32.42/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
37032 @@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
37033 complete(&root->kobj_unregister);
37034 }
37035
37036 -static struct sysfs_ops btrfs_super_attr_ops = {
37037 +static const struct sysfs_ops btrfs_super_attr_ops = {
37038 .show = btrfs_super_attr_show,
37039 .store = btrfs_super_attr_store,
37040 };
37041
37042 -static struct sysfs_ops btrfs_root_attr_ops = {
37043 +static const struct sysfs_ops btrfs_root_attr_ops = {
37044 .show = btrfs_root_attr_show,
37045 .store = btrfs_root_attr_store,
37046 };
37047 diff -urNp linux-2.6.32.42/fs/buffer.c linux-2.6.32.42/fs/buffer.c
37048 --- linux-2.6.32.42/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
37049 +++ linux-2.6.32.42/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
37050 @@ -25,6 +25,7 @@
37051 #include <linux/percpu.h>
37052 #include <linux/slab.h>
37053 #include <linux/capability.h>
37054 +#include <linux/security.h>
37055 #include <linux/blkdev.h>
37056 #include <linux/file.h>
37057 #include <linux/quotaops.h>
37058 diff -urNp linux-2.6.32.42/fs/cachefiles/bind.c linux-2.6.32.42/fs/cachefiles/bind.c
37059 --- linux-2.6.32.42/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
37060 +++ linux-2.6.32.42/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
37061 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
37062 args);
37063
37064 /* start by checking things over */
37065 - ASSERT(cache->fstop_percent >= 0 &&
37066 - cache->fstop_percent < cache->fcull_percent &&
37067 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
37068 cache->fcull_percent < cache->frun_percent &&
37069 cache->frun_percent < 100);
37070
37071 - ASSERT(cache->bstop_percent >= 0 &&
37072 - cache->bstop_percent < cache->bcull_percent &&
37073 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
37074 cache->bcull_percent < cache->brun_percent &&
37075 cache->brun_percent < 100);
37076
37077 diff -urNp linux-2.6.32.42/fs/cachefiles/daemon.c linux-2.6.32.42/fs/cachefiles/daemon.c
37078 --- linux-2.6.32.42/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
37079 +++ linux-2.6.32.42/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
37080 @@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
37081 if (test_bit(CACHEFILES_DEAD, &cache->flags))
37082 return -EIO;
37083
37084 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
37085 + if (datalen > PAGE_SIZE - 1)
37086 return -EOPNOTSUPP;
37087
37088 /* drag the command string into the kernel so we can parse it */
37089 @@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
37090 if (args[0] != '%' || args[1] != '\0')
37091 return -EINVAL;
37092
37093 - if (fstop < 0 || fstop >= cache->fcull_percent)
37094 + if (fstop >= cache->fcull_percent)
37095 return cachefiles_daemon_range_error(cache, args);
37096
37097 cache->fstop_percent = fstop;
37098 @@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
37099 if (args[0] != '%' || args[1] != '\0')
37100 return -EINVAL;
37101
37102 - if (bstop < 0 || bstop >= cache->bcull_percent)
37103 + if (bstop >= cache->bcull_percent)
37104 return cachefiles_daemon_range_error(cache, args);
37105
37106 cache->bstop_percent = bstop;
37107 diff -urNp linux-2.6.32.42/fs/cachefiles/internal.h linux-2.6.32.42/fs/cachefiles/internal.h
37108 --- linux-2.6.32.42/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
37109 +++ linux-2.6.32.42/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
37110 @@ -56,7 +56,7 @@ struct cachefiles_cache {
37111 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
37112 struct rb_root active_nodes; /* active nodes (can't be culled) */
37113 rwlock_t active_lock; /* lock for active_nodes */
37114 - atomic_t gravecounter; /* graveyard uniquifier */
37115 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
37116 unsigned frun_percent; /* when to stop culling (% files) */
37117 unsigned fcull_percent; /* when to start culling (% files) */
37118 unsigned fstop_percent; /* when to stop allocating (% files) */
37119 @@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
37120 * proc.c
37121 */
37122 #ifdef CONFIG_CACHEFILES_HISTOGRAM
37123 -extern atomic_t cachefiles_lookup_histogram[HZ];
37124 -extern atomic_t cachefiles_mkdir_histogram[HZ];
37125 -extern atomic_t cachefiles_create_histogram[HZ];
37126 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37127 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37128 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
37129
37130 extern int __init cachefiles_proc_init(void);
37131 extern void cachefiles_proc_cleanup(void);
37132 static inline
37133 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
37134 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
37135 {
37136 unsigned long jif = jiffies - start_jif;
37137 if (jif >= HZ)
37138 jif = HZ - 1;
37139 - atomic_inc(&histogram[jif]);
37140 + atomic_inc_unchecked(&histogram[jif]);
37141 }
37142
37143 #else
37144 diff -urNp linux-2.6.32.42/fs/cachefiles/namei.c linux-2.6.32.42/fs/cachefiles/namei.c
37145 --- linux-2.6.32.42/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
37146 +++ linux-2.6.32.42/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
37147 @@ -250,7 +250,7 @@ try_again:
37148 /* first step is to make up a grave dentry in the graveyard */
37149 sprintf(nbuffer, "%08x%08x",
37150 (uint32_t) get_seconds(),
37151 - (uint32_t) atomic_inc_return(&cache->gravecounter));
37152 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
37153
37154 /* do the multiway lock magic */
37155 trap = lock_rename(cache->graveyard, dir);
37156 diff -urNp linux-2.6.32.42/fs/cachefiles/proc.c linux-2.6.32.42/fs/cachefiles/proc.c
37157 --- linux-2.6.32.42/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
37158 +++ linux-2.6.32.42/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
37159 @@ -14,9 +14,9 @@
37160 #include <linux/seq_file.h>
37161 #include "internal.h"
37162
37163 -atomic_t cachefiles_lookup_histogram[HZ];
37164 -atomic_t cachefiles_mkdir_histogram[HZ];
37165 -atomic_t cachefiles_create_histogram[HZ];
37166 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37167 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37168 +atomic_unchecked_t cachefiles_create_histogram[HZ];
37169
37170 /*
37171 * display the latency histogram
37172 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
37173 return 0;
37174 default:
37175 index = (unsigned long) v - 3;
37176 - x = atomic_read(&cachefiles_lookup_histogram[index]);
37177 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
37178 - z = atomic_read(&cachefiles_create_histogram[index]);
37179 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
37180 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
37181 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
37182 if (x == 0 && y == 0 && z == 0)
37183 return 0;
37184
37185 diff -urNp linux-2.6.32.42/fs/cachefiles/rdwr.c linux-2.6.32.42/fs/cachefiles/rdwr.c
37186 --- linux-2.6.32.42/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
37187 +++ linux-2.6.32.42/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
37188 @@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
37189 old_fs = get_fs();
37190 set_fs(KERNEL_DS);
37191 ret = file->f_op->write(
37192 - file, (const void __user *) data, len, &pos);
37193 + file, (__force const void __user *) data, len, &pos);
37194 set_fs(old_fs);
37195 kunmap(page);
37196 if (ret != len)
37197 diff -urNp linux-2.6.32.42/fs/cifs/cifs_debug.c linux-2.6.32.42/fs/cifs/cifs_debug.c
37198 --- linux-2.6.32.42/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
37199 +++ linux-2.6.32.42/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
37200 @@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
37201 tcon = list_entry(tmp3,
37202 struct cifsTconInfo,
37203 tcon_list);
37204 - atomic_set(&tcon->num_smbs_sent, 0);
37205 - atomic_set(&tcon->num_writes, 0);
37206 - atomic_set(&tcon->num_reads, 0);
37207 - atomic_set(&tcon->num_oplock_brks, 0);
37208 - atomic_set(&tcon->num_opens, 0);
37209 - atomic_set(&tcon->num_posixopens, 0);
37210 - atomic_set(&tcon->num_posixmkdirs, 0);
37211 - atomic_set(&tcon->num_closes, 0);
37212 - atomic_set(&tcon->num_deletes, 0);
37213 - atomic_set(&tcon->num_mkdirs, 0);
37214 - atomic_set(&tcon->num_rmdirs, 0);
37215 - atomic_set(&tcon->num_renames, 0);
37216 - atomic_set(&tcon->num_t2renames, 0);
37217 - atomic_set(&tcon->num_ffirst, 0);
37218 - atomic_set(&tcon->num_fnext, 0);
37219 - atomic_set(&tcon->num_fclose, 0);
37220 - atomic_set(&tcon->num_hardlinks, 0);
37221 - atomic_set(&tcon->num_symlinks, 0);
37222 - atomic_set(&tcon->num_locks, 0);
37223 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
37224 + atomic_set_unchecked(&tcon->num_writes, 0);
37225 + atomic_set_unchecked(&tcon->num_reads, 0);
37226 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
37227 + atomic_set_unchecked(&tcon->num_opens, 0);
37228 + atomic_set_unchecked(&tcon->num_posixopens, 0);
37229 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
37230 + atomic_set_unchecked(&tcon->num_closes, 0);
37231 + atomic_set_unchecked(&tcon->num_deletes, 0);
37232 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
37233 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
37234 + atomic_set_unchecked(&tcon->num_renames, 0);
37235 + atomic_set_unchecked(&tcon->num_t2renames, 0);
37236 + atomic_set_unchecked(&tcon->num_ffirst, 0);
37237 + atomic_set_unchecked(&tcon->num_fnext, 0);
37238 + atomic_set_unchecked(&tcon->num_fclose, 0);
37239 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
37240 + atomic_set_unchecked(&tcon->num_symlinks, 0);
37241 + atomic_set_unchecked(&tcon->num_locks, 0);
37242 }
37243 }
37244 }
37245 @@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
37246 if (tcon->need_reconnect)
37247 seq_puts(m, "\tDISCONNECTED ");
37248 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
37249 - atomic_read(&tcon->num_smbs_sent),
37250 - atomic_read(&tcon->num_oplock_brks));
37251 + atomic_read_unchecked(&tcon->num_smbs_sent),
37252 + atomic_read_unchecked(&tcon->num_oplock_brks));
37253 seq_printf(m, "\nReads: %d Bytes: %lld",
37254 - atomic_read(&tcon->num_reads),
37255 + atomic_read_unchecked(&tcon->num_reads),
37256 (long long)(tcon->bytes_read));
37257 seq_printf(m, "\nWrites: %d Bytes: %lld",
37258 - atomic_read(&tcon->num_writes),
37259 + atomic_read_unchecked(&tcon->num_writes),
37260 (long long)(tcon->bytes_written));
37261 seq_printf(m, "\nFlushes: %d",
37262 - atomic_read(&tcon->num_flushes));
37263 + atomic_read_unchecked(&tcon->num_flushes));
37264 seq_printf(m, "\nLocks: %d HardLinks: %d "
37265 "Symlinks: %d",
37266 - atomic_read(&tcon->num_locks),
37267 - atomic_read(&tcon->num_hardlinks),
37268 - atomic_read(&tcon->num_symlinks));
37269 + atomic_read_unchecked(&tcon->num_locks),
37270 + atomic_read_unchecked(&tcon->num_hardlinks),
37271 + atomic_read_unchecked(&tcon->num_symlinks));
37272 seq_printf(m, "\nOpens: %d Closes: %d "
37273 "Deletes: %d",
37274 - atomic_read(&tcon->num_opens),
37275 - atomic_read(&tcon->num_closes),
37276 - atomic_read(&tcon->num_deletes));
37277 + atomic_read_unchecked(&tcon->num_opens),
37278 + atomic_read_unchecked(&tcon->num_closes),
37279 + atomic_read_unchecked(&tcon->num_deletes));
37280 seq_printf(m, "\nPosix Opens: %d "
37281 "Posix Mkdirs: %d",
37282 - atomic_read(&tcon->num_posixopens),
37283 - atomic_read(&tcon->num_posixmkdirs));
37284 + atomic_read_unchecked(&tcon->num_posixopens),
37285 + atomic_read_unchecked(&tcon->num_posixmkdirs));
37286 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
37287 - atomic_read(&tcon->num_mkdirs),
37288 - atomic_read(&tcon->num_rmdirs));
37289 + atomic_read_unchecked(&tcon->num_mkdirs),
37290 + atomic_read_unchecked(&tcon->num_rmdirs));
37291 seq_printf(m, "\nRenames: %d T2 Renames %d",
37292 - atomic_read(&tcon->num_renames),
37293 - atomic_read(&tcon->num_t2renames));
37294 + atomic_read_unchecked(&tcon->num_renames),
37295 + atomic_read_unchecked(&tcon->num_t2renames));
37296 seq_printf(m, "\nFindFirst: %d FNext %d "
37297 "FClose %d",
37298 - atomic_read(&tcon->num_ffirst),
37299 - atomic_read(&tcon->num_fnext),
37300 - atomic_read(&tcon->num_fclose));
37301 + atomic_read_unchecked(&tcon->num_ffirst),
37302 + atomic_read_unchecked(&tcon->num_fnext),
37303 + atomic_read_unchecked(&tcon->num_fclose));
37304 }
37305 }
37306 }
37307 diff -urNp linux-2.6.32.42/fs/cifs/cifsglob.h linux-2.6.32.42/fs/cifs/cifsglob.h
37308 --- linux-2.6.32.42/fs/cifs/cifsglob.h 2011-03-27 14:31:47.000000000 -0400
37309 +++ linux-2.6.32.42/fs/cifs/cifsglob.h 2011-05-04 17:56:28.000000000 -0400
37310 @@ -252,28 +252,28 @@ struct cifsTconInfo {
37311 __u16 Flags; /* optional support bits */
37312 enum statusEnum tidStatus;
37313 #ifdef CONFIG_CIFS_STATS
37314 - atomic_t num_smbs_sent;
37315 - atomic_t num_writes;
37316 - atomic_t num_reads;
37317 - atomic_t num_flushes;
37318 - atomic_t num_oplock_brks;
37319 - atomic_t num_opens;
37320 - atomic_t num_closes;
37321 - atomic_t num_deletes;
37322 - atomic_t num_mkdirs;
37323 - atomic_t num_posixopens;
37324 - atomic_t num_posixmkdirs;
37325 - atomic_t num_rmdirs;
37326 - atomic_t num_renames;
37327 - atomic_t num_t2renames;
37328 - atomic_t num_ffirst;
37329 - atomic_t num_fnext;
37330 - atomic_t num_fclose;
37331 - atomic_t num_hardlinks;
37332 - atomic_t num_symlinks;
37333 - atomic_t num_locks;
37334 - atomic_t num_acl_get;
37335 - atomic_t num_acl_set;
37336 + atomic_unchecked_t num_smbs_sent;
37337 + atomic_unchecked_t num_writes;
37338 + atomic_unchecked_t num_reads;
37339 + atomic_unchecked_t num_flushes;
37340 + atomic_unchecked_t num_oplock_brks;
37341 + atomic_unchecked_t num_opens;
37342 + atomic_unchecked_t num_closes;
37343 + atomic_unchecked_t num_deletes;
37344 + atomic_unchecked_t num_mkdirs;
37345 + atomic_unchecked_t num_posixopens;
37346 + atomic_unchecked_t num_posixmkdirs;
37347 + atomic_unchecked_t num_rmdirs;
37348 + atomic_unchecked_t num_renames;
37349 + atomic_unchecked_t num_t2renames;
37350 + atomic_unchecked_t num_ffirst;
37351 + atomic_unchecked_t num_fnext;
37352 + atomic_unchecked_t num_fclose;
37353 + atomic_unchecked_t num_hardlinks;
37354 + atomic_unchecked_t num_symlinks;
37355 + atomic_unchecked_t num_locks;
37356 + atomic_unchecked_t num_acl_get;
37357 + atomic_unchecked_t num_acl_set;
37358 #ifdef CONFIG_CIFS_STATS2
37359 unsigned long long time_writes;
37360 unsigned long long time_reads;
37361 @@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
37362 }
37363
37364 #ifdef CONFIG_CIFS_STATS
37365 -#define cifs_stats_inc atomic_inc
37366 +#define cifs_stats_inc atomic_inc_unchecked
37367
37368 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
37369 unsigned int bytes)
37370 diff -urNp linux-2.6.32.42/fs/cifs/link.c linux-2.6.32.42/fs/cifs/link.c
37371 --- linux-2.6.32.42/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
37372 +++ linux-2.6.32.42/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
37373 @@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
37374
37375 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
37376 {
37377 - char *p = nd_get_link(nd);
37378 + const char *p = nd_get_link(nd);
37379 if (!IS_ERR(p))
37380 kfree(p);
37381 }
37382 diff -urNp linux-2.6.32.42/fs/coda/cache.c linux-2.6.32.42/fs/coda/cache.c
37383 --- linux-2.6.32.42/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
37384 +++ linux-2.6.32.42/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
37385 @@ -24,14 +24,14 @@
37386 #include <linux/coda_fs_i.h>
37387 #include <linux/coda_cache.h>
37388
37389 -static atomic_t permission_epoch = ATOMIC_INIT(0);
37390 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
37391
37392 /* replace or extend an acl cache hit */
37393 void coda_cache_enter(struct inode *inode, int mask)
37394 {
37395 struct coda_inode_info *cii = ITOC(inode);
37396
37397 - cii->c_cached_epoch = atomic_read(&permission_epoch);
37398 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
37399 if (cii->c_uid != current_fsuid()) {
37400 cii->c_uid = current_fsuid();
37401 cii->c_cached_perm = mask;
37402 @@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
37403 void coda_cache_clear_inode(struct inode *inode)
37404 {
37405 struct coda_inode_info *cii = ITOC(inode);
37406 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
37407 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
37408 }
37409
37410 /* remove all acl caches */
37411 void coda_cache_clear_all(struct super_block *sb)
37412 {
37413 - atomic_inc(&permission_epoch);
37414 + atomic_inc_unchecked(&permission_epoch);
37415 }
37416
37417
37418 @@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
37419
37420 hit = (mask & cii->c_cached_perm) == mask &&
37421 cii->c_uid == current_fsuid() &&
37422 - cii->c_cached_epoch == atomic_read(&permission_epoch);
37423 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
37424
37425 return hit;
37426 }
37427 diff -urNp linux-2.6.32.42/fs/compat_binfmt_elf.c linux-2.6.32.42/fs/compat_binfmt_elf.c
37428 --- linux-2.6.32.42/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
37429 +++ linux-2.6.32.42/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
37430 @@ -29,10 +29,12 @@
37431 #undef elfhdr
37432 #undef elf_phdr
37433 #undef elf_note
37434 +#undef elf_dyn
37435 #undef elf_addr_t
37436 #define elfhdr elf32_hdr
37437 #define elf_phdr elf32_phdr
37438 #define elf_note elf32_note
37439 +#define elf_dyn Elf32_Dyn
37440 #define elf_addr_t Elf32_Addr
37441
37442 /*
37443 diff -urNp linux-2.6.32.42/fs/compat.c linux-2.6.32.42/fs/compat.c
37444 --- linux-2.6.32.42/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
37445 +++ linux-2.6.32.42/fs/compat.c 2011-05-16 21:46:57.000000000 -0400
37446 @@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
37447
37448 struct compat_readdir_callback {
37449 struct compat_old_linux_dirent __user *dirent;
37450 + struct file * file;
37451 int result;
37452 };
37453
37454 @@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
37455 buf->result = -EOVERFLOW;
37456 return -EOVERFLOW;
37457 }
37458 +
37459 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37460 + return 0;
37461 +
37462 buf->result++;
37463 dirent = buf->dirent;
37464 if (!access_ok(VERIFY_WRITE, dirent,
37465 @@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
37466
37467 buf.result = 0;
37468 buf.dirent = dirent;
37469 + buf.file = file;
37470
37471 error = vfs_readdir(file, compat_fillonedir, &buf);
37472 if (buf.result)
37473 @@ -899,6 +905,7 @@ struct compat_linux_dirent {
37474 struct compat_getdents_callback {
37475 struct compat_linux_dirent __user *current_dir;
37476 struct compat_linux_dirent __user *previous;
37477 + struct file * file;
37478 int count;
37479 int error;
37480 };
37481 @@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
37482 buf->error = -EOVERFLOW;
37483 return -EOVERFLOW;
37484 }
37485 +
37486 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37487 + return 0;
37488 +
37489 dirent = buf->previous;
37490 if (dirent) {
37491 if (__put_user(offset, &dirent->d_off))
37492 @@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
37493 buf.previous = NULL;
37494 buf.count = count;
37495 buf.error = 0;
37496 + buf.file = file;
37497
37498 error = vfs_readdir(file, compat_filldir, &buf);
37499 if (error >= 0)
37500 @@ -987,6 +999,7 @@ out:
37501 struct compat_getdents_callback64 {
37502 struct linux_dirent64 __user *current_dir;
37503 struct linux_dirent64 __user *previous;
37504 + struct file * file;
37505 int count;
37506 int error;
37507 };
37508 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
37509 buf->error = -EINVAL; /* only used if we fail.. */
37510 if (reclen > buf->count)
37511 return -EINVAL;
37512 +
37513 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
37514 + return 0;
37515 +
37516 dirent = buf->previous;
37517
37518 if (dirent) {
37519 @@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
37520 buf.previous = NULL;
37521 buf.count = count;
37522 buf.error = 0;
37523 + buf.file = file;
37524
37525 error = vfs_readdir(file, compat_filldir64, &buf);
37526 if (error >= 0)
37527 @@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
37528 * verify all the pointers
37529 */
37530 ret = -EINVAL;
37531 - if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
37532 + if (nr_segs > UIO_MAXIOV)
37533 goto out;
37534 if (!file->f_op)
37535 goto out;
37536 @@ -1463,6 +1481,11 @@ int compat_do_execve(char * filename,
37537 compat_uptr_t __user *envp,
37538 struct pt_regs * regs)
37539 {
37540 +#ifdef CONFIG_GRKERNSEC
37541 + struct file *old_exec_file;
37542 + struct acl_subject_label *old_acl;
37543 + struct rlimit old_rlim[RLIM_NLIMITS];
37544 +#endif
37545 struct linux_binprm *bprm;
37546 struct file *file;
37547 struct files_struct *displaced;
37548 @@ -1499,6 +1522,19 @@ int compat_do_execve(char * filename,
37549 bprm->filename = filename;
37550 bprm->interp = filename;
37551
37552 + if (gr_process_user_ban()) {
37553 + retval = -EPERM;
37554 + goto out_file;
37555 + }
37556 +
37557 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
37558 + retval = -EAGAIN;
37559 + if (gr_handle_nproc())
37560 + goto out_file;
37561 + retval = -EACCES;
37562 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
37563 + goto out_file;
37564 +
37565 retval = bprm_mm_init(bprm);
37566 if (retval)
37567 goto out_file;
37568 @@ -1528,9 +1564,40 @@ int compat_do_execve(char * filename,
37569 if (retval < 0)
37570 goto out;
37571
37572 + if (!gr_tpe_allow(file)) {
37573 + retval = -EACCES;
37574 + goto out;
37575 + }
37576 +
37577 + if (gr_check_crash_exec(file)) {
37578 + retval = -EACCES;
37579 + goto out;
37580 + }
37581 +
37582 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
37583 +
37584 + gr_handle_exec_args_compat(bprm, argv);
37585 +
37586 +#ifdef CONFIG_GRKERNSEC
37587 + old_acl = current->acl;
37588 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
37589 + old_exec_file = current->exec_file;
37590 + get_file(file);
37591 + current->exec_file = file;
37592 +#endif
37593 +
37594 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
37595 + bprm->unsafe & LSM_UNSAFE_SHARE);
37596 + if (retval < 0)
37597 + goto out_fail;
37598 +
37599 retval = search_binary_handler(bprm, regs);
37600 if (retval < 0)
37601 - goto out;
37602 + goto out_fail;
37603 +#ifdef CONFIG_GRKERNSEC
37604 + if (old_exec_file)
37605 + fput(old_exec_file);
37606 +#endif
37607
37608 /* execve succeeded */
37609 current->fs->in_exec = 0;
37610 @@ -1541,6 +1608,14 @@ int compat_do_execve(char * filename,
37611 put_files_struct(displaced);
37612 return retval;
37613
37614 +out_fail:
37615 +#ifdef CONFIG_GRKERNSEC
37616 + current->acl = old_acl;
37617 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
37618 + fput(current->exec_file);
37619 + current->exec_file = old_exec_file;
37620 +#endif
37621 +
37622 out:
37623 if (bprm->mm) {
37624 acct_arg_size(bprm, 0);
37625 @@ -1711,6 +1786,8 @@ int compat_core_sys_select(int n, compat
37626 struct fdtable *fdt;
37627 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
37628
37629 + pax_track_stack();
37630 +
37631 if (n < 0)
37632 goto out_nofds;
37633
37634 diff -urNp linux-2.6.32.42/fs/compat_ioctl.c linux-2.6.32.42/fs/compat_ioctl.c
37635 --- linux-2.6.32.42/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
37636 +++ linux-2.6.32.42/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
37637 @@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
37638 up = (struct compat_video_spu_palette __user *) arg;
37639 err = get_user(palp, &up->palette);
37640 err |= get_user(length, &up->length);
37641 + if (err)
37642 + return -EFAULT;
37643
37644 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
37645 err = put_user(compat_ptr(palp), &up_native->palette);
37646 diff -urNp linux-2.6.32.42/fs/configfs/dir.c linux-2.6.32.42/fs/configfs/dir.c
37647 --- linux-2.6.32.42/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
37648 +++ linux-2.6.32.42/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
37649 @@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
37650 }
37651 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
37652 struct configfs_dirent *next;
37653 - const char * name;
37654 + const unsigned char * name;
37655 + char d_name[sizeof(next->s_dentry->d_iname)];
37656 int len;
37657
37658 next = list_entry(p, struct configfs_dirent,
37659 @@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
37660 continue;
37661
37662 name = configfs_get_name(next);
37663 - len = strlen(name);
37664 + if (next->s_dentry && name == next->s_dentry->d_iname) {
37665 + len = next->s_dentry->d_name.len;
37666 + memcpy(d_name, name, len);
37667 + name = d_name;
37668 + } else
37669 + len = strlen(name);
37670 if (next->s_dentry)
37671 ino = next->s_dentry->d_inode->i_ino;
37672 else
37673 diff -urNp linux-2.6.32.42/fs/dcache.c linux-2.6.32.42/fs/dcache.c
37674 --- linux-2.6.32.42/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
37675 +++ linux-2.6.32.42/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
37676 @@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
37677
37678 static struct kmem_cache *dentry_cache __read_mostly;
37679
37680 -#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
37681 -
37682 /*
37683 * This is the single most critical data structure when it comes
37684 * to the dcache: the hashtable for lookups. Somebody should try
37685 @@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
37686 mempages -= reserve;
37687
37688 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
37689 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
37690 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
37691
37692 dcache_init();
37693 inode_init();
37694 diff -urNp linux-2.6.32.42/fs/dlm/lockspace.c linux-2.6.32.42/fs/dlm/lockspace.c
37695 --- linux-2.6.32.42/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
37696 +++ linux-2.6.32.42/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
37697 @@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
37698 kfree(ls);
37699 }
37700
37701 -static struct sysfs_ops dlm_attr_ops = {
37702 +static const struct sysfs_ops dlm_attr_ops = {
37703 .show = dlm_attr_show,
37704 .store = dlm_attr_store,
37705 };
37706 diff -urNp linux-2.6.32.42/fs/ecryptfs/inode.c linux-2.6.32.42/fs/ecryptfs/inode.c
37707 --- linux-2.6.32.42/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37708 +++ linux-2.6.32.42/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
37709 @@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
37710 old_fs = get_fs();
37711 set_fs(get_ds());
37712 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
37713 - (char __user *)lower_buf,
37714 + (__force char __user *)lower_buf,
37715 lower_bufsiz);
37716 set_fs(old_fs);
37717 if (rc < 0)
37718 @@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
37719 }
37720 old_fs = get_fs();
37721 set_fs(get_ds());
37722 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
37723 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
37724 set_fs(old_fs);
37725 if (rc < 0)
37726 goto out_free;
37727 diff -urNp linux-2.6.32.42/fs/exec.c linux-2.6.32.42/fs/exec.c
37728 --- linux-2.6.32.42/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
37729 +++ linux-2.6.32.42/fs/exec.c 2011-06-25 12:56:37.000000000 -0400
37730 @@ -56,12 +56,24 @@
37731 #include <linux/fsnotify.h>
37732 #include <linux/fs_struct.h>
37733 #include <linux/pipe_fs_i.h>
37734 +#include <linux/random.h>
37735 +#include <linux/seq_file.h>
37736 +
37737 +#ifdef CONFIG_PAX_REFCOUNT
37738 +#include <linux/kallsyms.h>
37739 +#include <linux/kdebug.h>
37740 +#endif
37741
37742 #include <asm/uaccess.h>
37743 #include <asm/mmu_context.h>
37744 #include <asm/tlb.h>
37745 #include "internal.h"
37746
37747 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
37748 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
37749 +EXPORT_SYMBOL(pax_set_initial_flags_func);
37750 +#endif
37751 +
37752 int core_uses_pid;
37753 char core_pattern[CORENAME_MAX_SIZE] = "core";
37754 unsigned int core_pipe_limit;
37755 @@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
37756 goto out;
37757
37758 file = do_filp_open(AT_FDCWD, tmp,
37759 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
37760 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
37761 MAY_READ | MAY_EXEC | MAY_OPEN);
37762 putname(tmp);
37763 error = PTR_ERR(file);
37764 @@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
37765 int write)
37766 {
37767 struct page *page;
37768 - int ret;
37769
37770 -#ifdef CONFIG_STACK_GROWSUP
37771 - if (write) {
37772 - ret = expand_stack_downwards(bprm->vma, pos);
37773 - if (ret < 0)
37774 - return NULL;
37775 - }
37776 -#endif
37777 - ret = get_user_pages(current, bprm->mm, pos,
37778 - 1, write, 1, &page, NULL);
37779 - if (ret <= 0)
37780 + if (0 > expand_stack_downwards(bprm->vma, pos))
37781 + return NULL;
37782 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
37783 return NULL;
37784
37785 if (write) {
37786 @@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
37787 vma->vm_end = STACK_TOP_MAX;
37788 vma->vm_start = vma->vm_end - PAGE_SIZE;
37789 vma->vm_flags = VM_STACK_FLAGS;
37790 +
37791 +#ifdef CONFIG_PAX_SEGMEXEC
37792 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
37793 +#endif
37794 +
37795 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
37796
37797 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
37798 @@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
37799 mm->stack_vm = mm->total_vm = 1;
37800 up_write(&mm->mmap_sem);
37801 bprm->p = vma->vm_end - sizeof(void *);
37802 +
37803 +#ifdef CONFIG_PAX_RANDUSTACK
37804 + if (randomize_va_space)
37805 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
37806 +#endif
37807 +
37808 return 0;
37809 err:
37810 up_write(&mm->mmap_sem);
37811 @@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
37812 int r;
37813 mm_segment_t oldfs = get_fs();
37814 set_fs(KERNEL_DS);
37815 - r = copy_strings(argc, (char __user * __user *)argv, bprm);
37816 + r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
37817 set_fs(oldfs);
37818 return r;
37819 }
37820 @@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
37821 unsigned long new_end = old_end - shift;
37822 struct mmu_gather *tlb;
37823
37824 - BUG_ON(new_start > new_end);
37825 + if (new_start >= new_end || new_start < mmap_min_addr)
37826 + return -ENOMEM;
37827
37828 /*
37829 * ensure there are no vmas between where we want to go
37830 @@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
37831 if (vma != find_vma(mm, new_start))
37832 return -EFAULT;
37833
37834 +#ifdef CONFIG_PAX_SEGMEXEC
37835 + BUG_ON(pax_find_mirror_vma(vma));
37836 +#endif
37837 +
37838 /*
37839 * cover the whole range: [new_start, old_end)
37840 */
37841 @@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
37842 stack_top = arch_align_stack(stack_top);
37843 stack_top = PAGE_ALIGN(stack_top);
37844
37845 - if (unlikely(stack_top < mmap_min_addr) ||
37846 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
37847 - return -ENOMEM;
37848 -
37849 stack_shift = vma->vm_end - stack_top;
37850
37851 bprm->p -= stack_shift;
37852 @@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
37853 bprm->exec -= stack_shift;
37854
37855 down_write(&mm->mmap_sem);
37856 +
37857 + /* Move stack pages down in memory. */
37858 + if (stack_shift) {
37859 + ret = shift_arg_pages(vma, stack_shift);
37860 + if (ret)
37861 + goto out_unlock;
37862 + }
37863 +
37864 vm_flags = VM_STACK_FLAGS;
37865
37866 /*
37867 @@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
37868 vm_flags &= ~VM_EXEC;
37869 vm_flags |= mm->def_flags;
37870
37871 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37872 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37873 + vm_flags &= ~VM_EXEC;
37874 +
37875 +#ifdef CONFIG_PAX_MPROTECT
37876 + if (mm->pax_flags & MF_PAX_MPROTECT)
37877 + vm_flags &= ~VM_MAYEXEC;
37878 +#endif
37879 +
37880 + }
37881 +#endif
37882 +
37883 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
37884 vm_flags);
37885 if (ret)
37886 goto out_unlock;
37887 BUG_ON(prev != vma);
37888
37889 - /* Move stack pages down in memory. */
37890 - if (stack_shift) {
37891 - ret = shift_arg_pages(vma, stack_shift);
37892 - if (ret)
37893 - goto out_unlock;
37894 - }
37895 -
37896 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
37897 stack_size = vma->vm_end - vma->vm_start;
37898 /*
37899 @@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
37900 int err;
37901
37902 file = do_filp_open(AT_FDCWD, name,
37903 - O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
37904 + O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
37905 MAY_EXEC | MAY_OPEN);
37906 if (IS_ERR(file))
37907 goto out;
37908 @@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
37909 old_fs = get_fs();
37910 set_fs(get_ds());
37911 /* The cast to a user pointer is valid due to the set_fs() */
37912 - result = vfs_read(file, (void __user *)addr, count, &pos);
37913 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
37914 set_fs(old_fs);
37915 return result;
37916 }
37917 @@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
37918 }
37919 rcu_read_unlock();
37920
37921 - if (p->fs->users > n_fs) {
37922 + if (atomic_read(&p->fs->users) > n_fs) {
37923 bprm->unsafe |= LSM_UNSAFE_SHARE;
37924 } else {
37925 res = -EAGAIN;
37926 @@ -1347,6 +1376,11 @@ int do_execve(char * filename,
37927 char __user *__user *envp,
37928 struct pt_regs * regs)
37929 {
37930 +#ifdef CONFIG_GRKERNSEC
37931 + struct file *old_exec_file;
37932 + struct acl_subject_label *old_acl;
37933 + struct rlimit old_rlim[RLIM_NLIMITS];
37934 +#endif
37935 struct linux_binprm *bprm;
37936 struct file *file;
37937 struct files_struct *displaced;
37938 @@ -1383,6 +1417,23 @@ int do_execve(char * filename,
37939 bprm->filename = filename;
37940 bprm->interp = filename;
37941
37942 + if (gr_process_user_ban()) {
37943 + retval = -EPERM;
37944 + goto out_file;
37945 + }
37946 +
37947 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
37948 +
37949 + if (gr_handle_nproc()) {
37950 + retval = -EAGAIN;
37951 + goto out_file;
37952 + }
37953 +
37954 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
37955 + retval = -EACCES;
37956 + goto out_file;
37957 + }
37958 +
37959 retval = bprm_mm_init(bprm);
37960 if (retval)
37961 goto out_file;
37962 @@ -1412,10 +1463,41 @@ int do_execve(char * filename,
37963 if (retval < 0)
37964 goto out;
37965
37966 + if (!gr_tpe_allow(file)) {
37967 + retval = -EACCES;
37968 + goto out;
37969 + }
37970 +
37971 + if (gr_check_crash_exec(file)) {
37972 + retval = -EACCES;
37973 + goto out;
37974 + }
37975 +
37976 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
37977 +
37978 + gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
37979 +
37980 +#ifdef CONFIG_GRKERNSEC
37981 + old_acl = current->acl;
37982 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
37983 + old_exec_file = current->exec_file;
37984 + get_file(file);
37985 + current->exec_file = file;
37986 +#endif
37987 +
37988 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
37989 + bprm->unsafe & LSM_UNSAFE_SHARE);
37990 + if (retval < 0)
37991 + goto out_fail;
37992 +
37993 current->flags &= ~PF_KTHREAD;
37994 retval = search_binary_handler(bprm,regs);
37995 if (retval < 0)
37996 - goto out;
37997 + goto out_fail;
37998 +#ifdef CONFIG_GRKERNSEC
37999 + if (old_exec_file)
38000 + fput(old_exec_file);
38001 +#endif
38002
38003 /* execve succeeded */
38004 current->fs->in_exec = 0;
38005 @@ -1426,6 +1508,14 @@ int do_execve(char * filename,
38006 put_files_struct(displaced);
38007 return retval;
38008
38009 +out_fail:
38010 +#ifdef CONFIG_GRKERNSEC
38011 + current->acl = old_acl;
38012 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
38013 + fput(current->exec_file);
38014 + current->exec_file = old_exec_file;
38015 +#endif
38016 +
38017 out:
38018 if (bprm->mm) {
38019 acct_arg_size(bprm, 0);
38020 @@ -1591,6 +1681,220 @@ out:
38021 return ispipe;
38022 }
38023
38024 +int pax_check_flags(unsigned long *flags)
38025 +{
38026 + int retval = 0;
38027 +
38028 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
38029 + if (*flags & MF_PAX_SEGMEXEC)
38030 + {
38031 + *flags &= ~MF_PAX_SEGMEXEC;
38032 + retval = -EINVAL;
38033 + }
38034 +#endif
38035 +
38036 + if ((*flags & MF_PAX_PAGEEXEC)
38037 +
38038 +#ifdef CONFIG_PAX_PAGEEXEC
38039 + && (*flags & MF_PAX_SEGMEXEC)
38040 +#endif
38041 +
38042 + )
38043 + {
38044 + *flags &= ~MF_PAX_PAGEEXEC;
38045 + retval = -EINVAL;
38046 + }
38047 +
38048 + if ((*flags & MF_PAX_MPROTECT)
38049 +
38050 +#ifdef CONFIG_PAX_MPROTECT
38051 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38052 +#endif
38053 +
38054 + )
38055 + {
38056 + *flags &= ~MF_PAX_MPROTECT;
38057 + retval = -EINVAL;
38058 + }
38059 +
38060 + if ((*flags & MF_PAX_EMUTRAMP)
38061 +
38062 +#ifdef CONFIG_PAX_EMUTRAMP
38063 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38064 +#endif
38065 +
38066 + )
38067 + {
38068 + *flags &= ~MF_PAX_EMUTRAMP;
38069 + retval = -EINVAL;
38070 + }
38071 +
38072 + return retval;
38073 +}
38074 +
38075 +EXPORT_SYMBOL(pax_check_flags);
38076 +
38077 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38078 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
38079 +{
38080 + struct task_struct *tsk = current;
38081 + struct mm_struct *mm = current->mm;
38082 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
38083 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
38084 + char *path_exec = NULL;
38085 + char *path_fault = NULL;
38086 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
38087 +
38088 + if (buffer_exec && buffer_fault) {
38089 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
38090 +
38091 + down_read(&mm->mmap_sem);
38092 + vma = mm->mmap;
38093 + while (vma && (!vma_exec || !vma_fault)) {
38094 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
38095 + vma_exec = vma;
38096 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
38097 + vma_fault = vma;
38098 + vma = vma->vm_next;
38099 + }
38100 + if (vma_exec) {
38101 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
38102 + if (IS_ERR(path_exec))
38103 + path_exec = "<path too long>";
38104 + else {
38105 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
38106 + if (path_exec) {
38107 + *path_exec = 0;
38108 + path_exec = buffer_exec;
38109 + } else
38110 + path_exec = "<path too long>";
38111 + }
38112 + }
38113 + if (vma_fault) {
38114 + start = vma_fault->vm_start;
38115 + end = vma_fault->vm_end;
38116 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
38117 + if (vma_fault->vm_file) {
38118 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
38119 + if (IS_ERR(path_fault))
38120 + path_fault = "<path too long>";
38121 + else {
38122 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
38123 + if (path_fault) {
38124 + *path_fault = 0;
38125 + path_fault = buffer_fault;
38126 + } else
38127 + path_fault = "<path too long>";
38128 + }
38129 + } else
38130 + path_fault = "<anonymous mapping>";
38131 + }
38132 + up_read(&mm->mmap_sem);
38133 + }
38134 + if (tsk->signal->curr_ip)
38135 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
38136 + else
38137 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
38138 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
38139 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
38140 + task_uid(tsk), task_euid(tsk), pc, sp);
38141 + free_page((unsigned long)buffer_exec);
38142 + free_page((unsigned long)buffer_fault);
38143 + pax_report_insns(pc, sp);
38144 + do_coredump(SIGKILL, SIGKILL, regs);
38145 +}
38146 +#endif
38147 +
38148 +#ifdef CONFIG_PAX_REFCOUNT
38149 +void pax_report_refcount_overflow(struct pt_regs *regs)
38150 +{
38151 + if (current->signal->curr_ip)
38152 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38153 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
38154 + else
38155 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38156 + current->comm, task_pid_nr(current), current_uid(), current_euid());
38157 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
38158 + show_regs(regs);
38159 + force_sig_specific(SIGKILL, current);
38160 +}
38161 +#endif
38162 +
38163 +#ifdef CONFIG_PAX_USERCOPY
38164 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
38165 +int object_is_on_stack(const void *obj, unsigned long len)
38166 +{
38167 + const void * const stack = task_stack_page(current);
38168 + const void * const stackend = stack + THREAD_SIZE;
38169 +
38170 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38171 + const void *frame = NULL;
38172 + const void *oldframe;
38173 +#endif
38174 +
38175 + if (obj + len < obj)
38176 + return -1;
38177 +
38178 + if (obj + len <= stack || stackend <= obj)
38179 + return 0;
38180 +
38181 + if (obj < stack || stackend < obj + len)
38182 + return -1;
38183 +
38184 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38185 + oldframe = __builtin_frame_address(1);
38186 + if (oldframe)
38187 + frame = __builtin_frame_address(2);
38188 + /*
38189 + low ----------------------------------------------> high
38190 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
38191 + ^----------------^
38192 + allow copies only within here
38193 + */
38194 + while (stack <= frame && frame < stackend) {
38195 + /* if obj + len extends past the last frame, this
38196 + check won't pass and the next frame will be 0,
38197 + causing us to bail out and correctly report
38198 + the copy as invalid
38199 + */
38200 + if (obj + len <= frame)
38201 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
38202 + oldframe = frame;
38203 + frame = *(const void * const *)frame;
38204 + }
38205 + return -1;
38206 +#else
38207 + return 1;
38208 +#endif
38209 +}
38210 +
38211 +
38212 +void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
38213 +{
38214 + if (current->signal->curr_ip)
38215 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38216 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38217 + else
38218 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38219 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38220 +
38221 + dump_stack();
38222 + gr_handle_kernel_exploit();
38223 + do_group_exit(SIGKILL);
38224 +}
38225 +#endif
38226 +
38227 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
38228 +void pax_track_stack(void)
38229 +{
38230 + unsigned long sp = (unsigned long)&sp;
38231 + if (sp < current_thread_info()->lowest_stack &&
38232 + sp > (unsigned long)task_stack_page(current))
38233 + current_thread_info()->lowest_stack = sp;
38234 +}
38235 +EXPORT_SYMBOL(pax_track_stack);
38236 +#endif
38237 +
38238 static int zap_process(struct task_struct *start)
38239 {
38240 struct task_struct *t;
38241 @@ -1793,17 +2097,17 @@ static void wait_for_dump_helpers(struct
38242 pipe = file->f_path.dentry->d_inode->i_pipe;
38243
38244 pipe_lock(pipe);
38245 - pipe->readers++;
38246 - pipe->writers--;
38247 + atomic_inc(&pipe->readers);
38248 + atomic_dec(&pipe->writers);
38249
38250 - while ((pipe->readers > 1) && (!signal_pending(current))) {
38251 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
38252 wake_up_interruptible_sync(&pipe->wait);
38253 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
38254 pipe_wait(pipe);
38255 }
38256
38257 - pipe->readers--;
38258 - pipe->writers++;
38259 + atomic_dec(&pipe->readers);
38260 + atomic_inc(&pipe->writers);
38261 pipe_unlock(pipe);
38262
38263 }
38264 @@ -1826,10 +2130,13 @@ void do_coredump(long signr, int exit_co
38265 char **helper_argv = NULL;
38266 int helper_argc = 0;
38267 int dump_count = 0;
38268 - static atomic_t core_dump_count = ATOMIC_INIT(0);
38269 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
38270
38271 audit_core_dumps(signr);
38272
38273 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
38274 + gr_handle_brute_attach(current, mm->flags);
38275 +
38276 binfmt = mm->binfmt;
38277 if (!binfmt || !binfmt->core_dump)
38278 goto fail;
38279 @@ -1874,6 +2181,8 @@ void do_coredump(long signr, int exit_co
38280 */
38281 clear_thread_flag(TIF_SIGPENDING);
38282
38283 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
38284 +
38285 /*
38286 * lock_kernel() because format_corename() is controlled by sysctl, which
38287 * uses lock_kernel()
38288 @@ -1908,7 +2217,7 @@ void do_coredump(long signr, int exit_co
38289 goto fail_unlock;
38290 }
38291
38292 - dump_count = atomic_inc_return(&core_dump_count);
38293 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
38294 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
38295 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
38296 task_tgid_vnr(current), current->comm);
38297 @@ -1972,7 +2281,7 @@ close_fail:
38298 filp_close(file, NULL);
38299 fail_dropcount:
38300 if (dump_count)
38301 - atomic_dec(&core_dump_count);
38302 + atomic_dec_unchecked(&core_dump_count);
38303 fail_unlock:
38304 if (helper_argv)
38305 argv_free(helper_argv);
38306 diff -urNp linux-2.6.32.42/fs/ext2/balloc.c linux-2.6.32.42/fs/ext2/balloc.c
38307 --- linux-2.6.32.42/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
38308 +++ linux-2.6.32.42/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
38309 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
38310
38311 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38312 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38313 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38314 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38315 sbi->s_resuid != current_fsuid() &&
38316 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38317 return 0;
38318 diff -urNp linux-2.6.32.42/fs/ext3/balloc.c linux-2.6.32.42/fs/ext3/balloc.c
38319 --- linux-2.6.32.42/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
38320 +++ linux-2.6.32.42/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
38321 @@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
38322
38323 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38324 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38325 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38326 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38327 sbi->s_resuid != current_fsuid() &&
38328 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38329 return 0;
38330 diff -urNp linux-2.6.32.42/fs/ext4/balloc.c linux-2.6.32.42/fs/ext4/balloc.c
38331 --- linux-2.6.32.42/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
38332 +++ linux-2.6.32.42/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
38333 @@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
38334 /* Hm, nope. Are (enough) root reserved blocks available? */
38335 if (sbi->s_resuid == current_fsuid() ||
38336 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
38337 - capable(CAP_SYS_RESOURCE)) {
38338 + capable_nolog(CAP_SYS_RESOURCE)) {
38339 if (free_blocks >= (nblocks + dirty_blocks))
38340 return 1;
38341 }
38342 diff -urNp linux-2.6.32.42/fs/ext4/ext4.h linux-2.6.32.42/fs/ext4/ext4.h
38343 --- linux-2.6.32.42/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
38344 +++ linux-2.6.32.42/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
38345 @@ -1078,19 +1078,19 @@ struct ext4_sb_info {
38346
38347 /* stats for buddy allocator */
38348 spinlock_t s_mb_pa_lock;
38349 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
38350 - atomic_t s_bal_success; /* we found long enough chunks */
38351 - atomic_t s_bal_allocated; /* in blocks */
38352 - atomic_t s_bal_ex_scanned; /* total extents scanned */
38353 - atomic_t s_bal_goals; /* goal hits */
38354 - atomic_t s_bal_breaks; /* too long searches */
38355 - atomic_t s_bal_2orders; /* 2^order hits */
38356 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
38357 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
38358 + atomic_unchecked_t s_bal_allocated; /* in blocks */
38359 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
38360 + atomic_unchecked_t s_bal_goals; /* goal hits */
38361 + atomic_unchecked_t s_bal_breaks; /* too long searches */
38362 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
38363 spinlock_t s_bal_lock;
38364 unsigned long s_mb_buddies_generated;
38365 unsigned long long s_mb_generation_time;
38366 - atomic_t s_mb_lost_chunks;
38367 - atomic_t s_mb_preallocated;
38368 - atomic_t s_mb_discarded;
38369 + atomic_unchecked_t s_mb_lost_chunks;
38370 + atomic_unchecked_t s_mb_preallocated;
38371 + atomic_unchecked_t s_mb_discarded;
38372 atomic_t s_lock_busy;
38373
38374 /* locality groups */
38375 diff -urNp linux-2.6.32.42/fs/ext4/mballoc.c linux-2.6.32.42/fs/ext4/mballoc.c
38376 --- linux-2.6.32.42/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
38377 +++ linux-2.6.32.42/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
38378 @@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
38379 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
38380
38381 if (EXT4_SB(sb)->s_mb_stats)
38382 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
38383 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
38384
38385 break;
38386 }
38387 @@ -2131,7 +2131,7 @@ repeat:
38388 ac->ac_status = AC_STATUS_CONTINUE;
38389 ac->ac_flags |= EXT4_MB_HINT_FIRST;
38390 cr = 3;
38391 - atomic_inc(&sbi->s_mb_lost_chunks);
38392 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
38393 goto repeat;
38394 }
38395 }
38396 @@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
38397 ext4_grpblk_t counters[16];
38398 } sg;
38399
38400 + pax_track_stack();
38401 +
38402 group--;
38403 if (group == 0)
38404 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
38405 @@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
38406 if (sbi->s_mb_stats) {
38407 printk(KERN_INFO
38408 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
38409 - atomic_read(&sbi->s_bal_allocated),
38410 - atomic_read(&sbi->s_bal_reqs),
38411 - atomic_read(&sbi->s_bal_success));
38412 + atomic_read_unchecked(&sbi->s_bal_allocated),
38413 + atomic_read_unchecked(&sbi->s_bal_reqs),
38414 + atomic_read_unchecked(&sbi->s_bal_success));
38415 printk(KERN_INFO
38416 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
38417 "%u 2^N hits, %u breaks, %u lost\n",
38418 - atomic_read(&sbi->s_bal_ex_scanned),
38419 - atomic_read(&sbi->s_bal_goals),
38420 - atomic_read(&sbi->s_bal_2orders),
38421 - atomic_read(&sbi->s_bal_breaks),
38422 - atomic_read(&sbi->s_mb_lost_chunks));
38423 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
38424 + atomic_read_unchecked(&sbi->s_bal_goals),
38425 + atomic_read_unchecked(&sbi->s_bal_2orders),
38426 + atomic_read_unchecked(&sbi->s_bal_breaks),
38427 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
38428 printk(KERN_INFO
38429 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
38430 sbi->s_mb_buddies_generated++,
38431 sbi->s_mb_generation_time);
38432 printk(KERN_INFO
38433 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
38434 - atomic_read(&sbi->s_mb_preallocated),
38435 - atomic_read(&sbi->s_mb_discarded));
38436 + atomic_read_unchecked(&sbi->s_mb_preallocated),
38437 + atomic_read_unchecked(&sbi->s_mb_discarded));
38438 }
38439
38440 free_percpu(sbi->s_locality_groups);
38441 @@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
38442 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
38443
38444 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
38445 - atomic_inc(&sbi->s_bal_reqs);
38446 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
38447 + atomic_inc_unchecked(&sbi->s_bal_reqs);
38448 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
38449 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
38450 - atomic_inc(&sbi->s_bal_success);
38451 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
38452 + atomic_inc_unchecked(&sbi->s_bal_success);
38453 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
38454 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
38455 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
38456 - atomic_inc(&sbi->s_bal_goals);
38457 + atomic_inc_unchecked(&sbi->s_bal_goals);
38458 if (ac->ac_found > sbi->s_mb_max_to_scan)
38459 - atomic_inc(&sbi->s_bal_breaks);
38460 + atomic_inc_unchecked(&sbi->s_bal_breaks);
38461 }
38462
38463 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
38464 @@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
38465 trace_ext4_mb_new_inode_pa(ac, pa);
38466
38467 ext4_mb_use_inode_pa(ac, pa);
38468 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38469 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38470
38471 ei = EXT4_I(ac->ac_inode);
38472 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
38473 @@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
38474 trace_ext4_mb_new_group_pa(ac, pa);
38475
38476 ext4_mb_use_group_pa(ac, pa);
38477 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38478 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
38479
38480 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
38481 lg = ac->ac_lg;
38482 @@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
38483 * from the bitmap and continue.
38484 */
38485 }
38486 - atomic_add(free, &sbi->s_mb_discarded);
38487 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
38488
38489 return err;
38490 }
38491 @@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
38492 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
38493 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
38494 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
38495 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
38496 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
38497
38498 if (ac) {
38499 ac->ac_sb = sb;
38500 diff -urNp linux-2.6.32.42/fs/ext4/super.c linux-2.6.32.42/fs/ext4/super.c
38501 --- linux-2.6.32.42/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
38502 +++ linux-2.6.32.42/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
38503 @@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
38504 }
38505
38506
38507 -static struct sysfs_ops ext4_attr_ops = {
38508 +static const struct sysfs_ops ext4_attr_ops = {
38509 .show = ext4_attr_show,
38510 .store = ext4_attr_store,
38511 };
38512 diff -urNp linux-2.6.32.42/fs/fcntl.c linux-2.6.32.42/fs/fcntl.c
38513 --- linux-2.6.32.42/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
38514 +++ linux-2.6.32.42/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
38515 @@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
38516 if (err)
38517 return err;
38518
38519 + if (gr_handle_chroot_fowner(pid, type))
38520 + return -ENOENT;
38521 + if (gr_check_protected_task_fowner(pid, type))
38522 + return -EACCES;
38523 +
38524 f_modown(filp, pid, type, force);
38525 return 0;
38526 }
38527 @@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
38528 switch (cmd) {
38529 case F_DUPFD:
38530 case F_DUPFD_CLOEXEC:
38531 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
38532 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
38533 break;
38534 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
38535 diff -urNp linux-2.6.32.42/fs/fifo.c linux-2.6.32.42/fs/fifo.c
38536 --- linux-2.6.32.42/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
38537 +++ linux-2.6.32.42/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
38538 @@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
38539 */
38540 filp->f_op = &read_pipefifo_fops;
38541 pipe->r_counter++;
38542 - if (pipe->readers++ == 0)
38543 + if (atomic_inc_return(&pipe->readers) == 1)
38544 wake_up_partner(inode);
38545
38546 - if (!pipe->writers) {
38547 + if (!atomic_read(&pipe->writers)) {
38548 if ((filp->f_flags & O_NONBLOCK)) {
38549 /* suppress POLLHUP until we have
38550 * seen a writer */
38551 @@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
38552 * errno=ENXIO when there is no process reading the FIFO.
38553 */
38554 ret = -ENXIO;
38555 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
38556 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
38557 goto err;
38558
38559 filp->f_op = &write_pipefifo_fops;
38560 pipe->w_counter++;
38561 - if (!pipe->writers++)
38562 + if (atomic_inc_return(&pipe->writers) == 1)
38563 wake_up_partner(inode);
38564
38565 - if (!pipe->readers) {
38566 + if (!atomic_read(&pipe->readers)) {
38567 wait_for_partner(inode, &pipe->r_counter);
38568 if (signal_pending(current))
38569 goto err_wr;
38570 @@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
38571 */
38572 filp->f_op = &rdwr_pipefifo_fops;
38573
38574 - pipe->readers++;
38575 - pipe->writers++;
38576 + atomic_inc(&pipe->readers);
38577 + atomic_inc(&pipe->writers);
38578 pipe->r_counter++;
38579 pipe->w_counter++;
38580 - if (pipe->readers == 1 || pipe->writers == 1)
38581 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
38582 wake_up_partner(inode);
38583 break;
38584
38585 @@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
38586 return 0;
38587
38588 err_rd:
38589 - if (!--pipe->readers)
38590 + if (atomic_dec_and_test(&pipe->readers))
38591 wake_up_interruptible(&pipe->wait);
38592 ret = -ERESTARTSYS;
38593 goto err;
38594
38595 err_wr:
38596 - if (!--pipe->writers)
38597 + if (atomic_dec_and_test(&pipe->writers))
38598 wake_up_interruptible(&pipe->wait);
38599 ret = -ERESTARTSYS;
38600 goto err;
38601
38602 err:
38603 - if (!pipe->readers && !pipe->writers)
38604 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
38605 free_pipe_info(inode);
38606
38607 err_nocleanup:
38608 diff -urNp linux-2.6.32.42/fs/file.c linux-2.6.32.42/fs/file.c
38609 --- linux-2.6.32.42/fs/file.c 2011-03-27 14:31:47.000000000 -0400
38610 +++ linux-2.6.32.42/fs/file.c 2011-04-17 15:56:46.000000000 -0400
38611 @@ -14,6 +14,7 @@
38612 #include <linux/slab.h>
38613 #include <linux/vmalloc.h>
38614 #include <linux/file.h>
38615 +#include <linux/security.h>
38616 #include <linux/fdtable.h>
38617 #include <linux/bitops.h>
38618 #include <linux/interrupt.h>
38619 @@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
38620 * N.B. For clone tasks sharing a files structure, this test
38621 * will limit the total number of files that can be opened.
38622 */
38623 +
38624 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
38625 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
38626 return -EMFILE;
38627
38628 diff -urNp linux-2.6.32.42/fs/filesystems.c linux-2.6.32.42/fs/filesystems.c
38629 --- linux-2.6.32.42/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
38630 +++ linux-2.6.32.42/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
38631 @@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
38632 int len = dot ? dot - name : strlen(name);
38633
38634 fs = __get_fs_type(name, len);
38635 +
38636 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
38637 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
38638 +#else
38639 if (!fs && (request_module("%.*s", len, name) == 0))
38640 +#endif
38641 fs = __get_fs_type(name, len);
38642
38643 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
38644 diff -urNp linux-2.6.32.42/fs/fscache/cookie.c linux-2.6.32.42/fs/fscache/cookie.c
38645 --- linux-2.6.32.42/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
38646 +++ linux-2.6.32.42/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
38647 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
38648 parent ? (char *) parent->def->name : "<no-parent>",
38649 def->name, netfs_data);
38650
38651 - fscache_stat(&fscache_n_acquires);
38652 + fscache_stat_unchecked(&fscache_n_acquires);
38653
38654 /* if there's no parent cookie, then we don't create one here either */
38655 if (!parent) {
38656 - fscache_stat(&fscache_n_acquires_null);
38657 + fscache_stat_unchecked(&fscache_n_acquires_null);
38658 _leave(" [no parent]");
38659 return NULL;
38660 }
38661 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
38662 /* allocate and initialise a cookie */
38663 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
38664 if (!cookie) {
38665 - fscache_stat(&fscache_n_acquires_oom);
38666 + fscache_stat_unchecked(&fscache_n_acquires_oom);
38667 _leave(" [ENOMEM]");
38668 return NULL;
38669 }
38670 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
38671
38672 switch (cookie->def->type) {
38673 case FSCACHE_COOKIE_TYPE_INDEX:
38674 - fscache_stat(&fscache_n_cookie_index);
38675 + fscache_stat_unchecked(&fscache_n_cookie_index);
38676 break;
38677 case FSCACHE_COOKIE_TYPE_DATAFILE:
38678 - fscache_stat(&fscache_n_cookie_data);
38679 + fscache_stat_unchecked(&fscache_n_cookie_data);
38680 break;
38681 default:
38682 - fscache_stat(&fscache_n_cookie_special);
38683 + fscache_stat_unchecked(&fscache_n_cookie_special);
38684 break;
38685 }
38686
38687 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
38688 if (fscache_acquire_non_index_cookie(cookie) < 0) {
38689 atomic_dec(&parent->n_children);
38690 __fscache_cookie_put(cookie);
38691 - fscache_stat(&fscache_n_acquires_nobufs);
38692 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
38693 _leave(" = NULL");
38694 return NULL;
38695 }
38696 }
38697
38698 - fscache_stat(&fscache_n_acquires_ok);
38699 + fscache_stat_unchecked(&fscache_n_acquires_ok);
38700 _leave(" = %p", cookie);
38701 return cookie;
38702 }
38703 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
38704 cache = fscache_select_cache_for_object(cookie->parent);
38705 if (!cache) {
38706 up_read(&fscache_addremove_sem);
38707 - fscache_stat(&fscache_n_acquires_no_cache);
38708 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
38709 _leave(" = -ENOMEDIUM [no cache]");
38710 return -ENOMEDIUM;
38711 }
38712 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
38713 object = cache->ops->alloc_object(cache, cookie);
38714 fscache_stat_d(&fscache_n_cop_alloc_object);
38715 if (IS_ERR(object)) {
38716 - fscache_stat(&fscache_n_object_no_alloc);
38717 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
38718 ret = PTR_ERR(object);
38719 goto error;
38720 }
38721
38722 - fscache_stat(&fscache_n_object_alloc);
38723 + fscache_stat_unchecked(&fscache_n_object_alloc);
38724
38725 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
38726
38727 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
38728 struct fscache_object *object;
38729 struct hlist_node *_p;
38730
38731 - fscache_stat(&fscache_n_updates);
38732 + fscache_stat_unchecked(&fscache_n_updates);
38733
38734 if (!cookie) {
38735 - fscache_stat(&fscache_n_updates_null);
38736 + fscache_stat_unchecked(&fscache_n_updates_null);
38737 _leave(" [no cookie]");
38738 return;
38739 }
38740 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
38741 struct fscache_object *object;
38742 unsigned long event;
38743
38744 - fscache_stat(&fscache_n_relinquishes);
38745 + fscache_stat_unchecked(&fscache_n_relinquishes);
38746 if (retire)
38747 - fscache_stat(&fscache_n_relinquishes_retire);
38748 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
38749
38750 if (!cookie) {
38751 - fscache_stat(&fscache_n_relinquishes_null);
38752 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
38753 _leave(" [no cookie]");
38754 return;
38755 }
38756 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
38757
38758 /* wait for the cookie to finish being instantiated (or to fail) */
38759 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
38760 - fscache_stat(&fscache_n_relinquishes_waitcrt);
38761 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
38762 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
38763 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
38764 }
38765 diff -urNp linux-2.6.32.42/fs/fscache/internal.h linux-2.6.32.42/fs/fscache/internal.h
38766 --- linux-2.6.32.42/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
38767 +++ linux-2.6.32.42/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
38768 @@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
38769 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
38770 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
38771
38772 -extern atomic_t fscache_n_op_pend;
38773 -extern atomic_t fscache_n_op_run;
38774 -extern atomic_t fscache_n_op_enqueue;
38775 -extern atomic_t fscache_n_op_deferred_release;
38776 -extern atomic_t fscache_n_op_release;
38777 -extern atomic_t fscache_n_op_gc;
38778 -extern atomic_t fscache_n_op_cancelled;
38779 -extern atomic_t fscache_n_op_rejected;
38780 -
38781 -extern atomic_t fscache_n_attr_changed;
38782 -extern atomic_t fscache_n_attr_changed_ok;
38783 -extern atomic_t fscache_n_attr_changed_nobufs;
38784 -extern atomic_t fscache_n_attr_changed_nomem;
38785 -extern atomic_t fscache_n_attr_changed_calls;
38786 -
38787 -extern atomic_t fscache_n_allocs;
38788 -extern atomic_t fscache_n_allocs_ok;
38789 -extern atomic_t fscache_n_allocs_wait;
38790 -extern atomic_t fscache_n_allocs_nobufs;
38791 -extern atomic_t fscache_n_allocs_intr;
38792 -extern atomic_t fscache_n_allocs_object_dead;
38793 -extern atomic_t fscache_n_alloc_ops;
38794 -extern atomic_t fscache_n_alloc_op_waits;
38795 -
38796 -extern atomic_t fscache_n_retrievals;
38797 -extern atomic_t fscache_n_retrievals_ok;
38798 -extern atomic_t fscache_n_retrievals_wait;
38799 -extern atomic_t fscache_n_retrievals_nodata;
38800 -extern atomic_t fscache_n_retrievals_nobufs;
38801 -extern atomic_t fscache_n_retrievals_intr;
38802 -extern atomic_t fscache_n_retrievals_nomem;
38803 -extern atomic_t fscache_n_retrievals_object_dead;
38804 -extern atomic_t fscache_n_retrieval_ops;
38805 -extern atomic_t fscache_n_retrieval_op_waits;
38806 -
38807 -extern atomic_t fscache_n_stores;
38808 -extern atomic_t fscache_n_stores_ok;
38809 -extern atomic_t fscache_n_stores_again;
38810 -extern atomic_t fscache_n_stores_nobufs;
38811 -extern atomic_t fscache_n_stores_oom;
38812 -extern atomic_t fscache_n_store_ops;
38813 -extern atomic_t fscache_n_store_calls;
38814 -extern atomic_t fscache_n_store_pages;
38815 -extern atomic_t fscache_n_store_radix_deletes;
38816 -extern atomic_t fscache_n_store_pages_over_limit;
38817 -
38818 -extern atomic_t fscache_n_store_vmscan_not_storing;
38819 -extern atomic_t fscache_n_store_vmscan_gone;
38820 -extern atomic_t fscache_n_store_vmscan_busy;
38821 -extern atomic_t fscache_n_store_vmscan_cancelled;
38822 -
38823 -extern atomic_t fscache_n_marks;
38824 -extern atomic_t fscache_n_uncaches;
38825 -
38826 -extern atomic_t fscache_n_acquires;
38827 -extern atomic_t fscache_n_acquires_null;
38828 -extern atomic_t fscache_n_acquires_no_cache;
38829 -extern atomic_t fscache_n_acquires_ok;
38830 -extern atomic_t fscache_n_acquires_nobufs;
38831 -extern atomic_t fscache_n_acquires_oom;
38832 -
38833 -extern atomic_t fscache_n_updates;
38834 -extern atomic_t fscache_n_updates_null;
38835 -extern atomic_t fscache_n_updates_run;
38836 -
38837 -extern atomic_t fscache_n_relinquishes;
38838 -extern atomic_t fscache_n_relinquishes_null;
38839 -extern atomic_t fscache_n_relinquishes_waitcrt;
38840 -extern atomic_t fscache_n_relinquishes_retire;
38841 -
38842 -extern atomic_t fscache_n_cookie_index;
38843 -extern atomic_t fscache_n_cookie_data;
38844 -extern atomic_t fscache_n_cookie_special;
38845 -
38846 -extern atomic_t fscache_n_object_alloc;
38847 -extern atomic_t fscache_n_object_no_alloc;
38848 -extern atomic_t fscache_n_object_lookups;
38849 -extern atomic_t fscache_n_object_lookups_negative;
38850 -extern atomic_t fscache_n_object_lookups_positive;
38851 -extern atomic_t fscache_n_object_lookups_timed_out;
38852 -extern atomic_t fscache_n_object_created;
38853 -extern atomic_t fscache_n_object_avail;
38854 -extern atomic_t fscache_n_object_dead;
38855 -
38856 -extern atomic_t fscache_n_checkaux_none;
38857 -extern atomic_t fscache_n_checkaux_okay;
38858 -extern atomic_t fscache_n_checkaux_update;
38859 -extern atomic_t fscache_n_checkaux_obsolete;
38860 +extern atomic_unchecked_t fscache_n_op_pend;
38861 +extern atomic_unchecked_t fscache_n_op_run;
38862 +extern atomic_unchecked_t fscache_n_op_enqueue;
38863 +extern atomic_unchecked_t fscache_n_op_deferred_release;
38864 +extern atomic_unchecked_t fscache_n_op_release;
38865 +extern atomic_unchecked_t fscache_n_op_gc;
38866 +extern atomic_unchecked_t fscache_n_op_cancelled;
38867 +extern atomic_unchecked_t fscache_n_op_rejected;
38868 +
38869 +extern atomic_unchecked_t fscache_n_attr_changed;
38870 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
38871 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
38872 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
38873 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
38874 +
38875 +extern atomic_unchecked_t fscache_n_allocs;
38876 +extern atomic_unchecked_t fscache_n_allocs_ok;
38877 +extern atomic_unchecked_t fscache_n_allocs_wait;
38878 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
38879 +extern atomic_unchecked_t fscache_n_allocs_intr;
38880 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
38881 +extern atomic_unchecked_t fscache_n_alloc_ops;
38882 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
38883 +
38884 +extern atomic_unchecked_t fscache_n_retrievals;
38885 +extern atomic_unchecked_t fscache_n_retrievals_ok;
38886 +extern atomic_unchecked_t fscache_n_retrievals_wait;
38887 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
38888 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
38889 +extern atomic_unchecked_t fscache_n_retrievals_intr;
38890 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
38891 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
38892 +extern atomic_unchecked_t fscache_n_retrieval_ops;
38893 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
38894 +
38895 +extern atomic_unchecked_t fscache_n_stores;
38896 +extern atomic_unchecked_t fscache_n_stores_ok;
38897 +extern atomic_unchecked_t fscache_n_stores_again;
38898 +extern atomic_unchecked_t fscache_n_stores_nobufs;
38899 +extern atomic_unchecked_t fscache_n_stores_oom;
38900 +extern atomic_unchecked_t fscache_n_store_ops;
38901 +extern atomic_unchecked_t fscache_n_store_calls;
38902 +extern atomic_unchecked_t fscache_n_store_pages;
38903 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
38904 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
38905 +
38906 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
38907 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
38908 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
38909 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
38910 +
38911 +extern atomic_unchecked_t fscache_n_marks;
38912 +extern atomic_unchecked_t fscache_n_uncaches;
38913 +
38914 +extern atomic_unchecked_t fscache_n_acquires;
38915 +extern atomic_unchecked_t fscache_n_acquires_null;
38916 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
38917 +extern atomic_unchecked_t fscache_n_acquires_ok;
38918 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
38919 +extern atomic_unchecked_t fscache_n_acquires_oom;
38920 +
38921 +extern atomic_unchecked_t fscache_n_updates;
38922 +extern atomic_unchecked_t fscache_n_updates_null;
38923 +extern atomic_unchecked_t fscache_n_updates_run;
38924 +
38925 +extern atomic_unchecked_t fscache_n_relinquishes;
38926 +extern atomic_unchecked_t fscache_n_relinquishes_null;
38927 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
38928 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
38929 +
38930 +extern atomic_unchecked_t fscache_n_cookie_index;
38931 +extern atomic_unchecked_t fscache_n_cookie_data;
38932 +extern atomic_unchecked_t fscache_n_cookie_special;
38933 +
38934 +extern atomic_unchecked_t fscache_n_object_alloc;
38935 +extern atomic_unchecked_t fscache_n_object_no_alloc;
38936 +extern atomic_unchecked_t fscache_n_object_lookups;
38937 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
38938 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
38939 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
38940 +extern atomic_unchecked_t fscache_n_object_created;
38941 +extern atomic_unchecked_t fscache_n_object_avail;
38942 +extern atomic_unchecked_t fscache_n_object_dead;
38943 +
38944 +extern atomic_unchecked_t fscache_n_checkaux_none;
38945 +extern atomic_unchecked_t fscache_n_checkaux_okay;
38946 +extern atomic_unchecked_t fscache_n_checkaux_update;
38947 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
38948
38949 extern atomic_t fscache_n_cop_alloc_object;
38950 extern atomic_t fscache_n_cop_lookup_object;
38951 @@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
38952 atomic_inc(stat);
38953 }
38954
38955 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
38956 +{
38957 + atomic_inc_unchecked(stat);
38958 +}
38959 +
38960 static inline void fscache_stat_d(atomic_t *stat)
38961 {
38962 atomic_dec(stat);
38963 @@ -259,6 +264,7 @@ extern const struct file_operations fsca
38964
38965 #define __fscache_stat(stat) (NULL)
38966 #define fscache_stat(stat) do {} while (0)
38967 +#define fscache_stat_unchecked(stat) do {} while (0)
38968 #define fscache_stat_d(stat) do {} while (0)
38969 #endif
38970
38971 diff -urNp linux-2.6.32.42/fs/fscache/object.c linux-2.6.32.42/fs/fscache/object.c
38972 --- linux-2.6.32.42/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
38973 +++ linux-2.6.32.42/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
38974 @@ -144,7 +144,7 @@ static void fscache_object_state_machine
38975 /* update the object metadata on disk */
38976 case FSCACHE_OBJECT_UPDATING:
38977 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
38978 - fscache_stat(&fscache_n_updates_run);
38979 + fscache_stat_unchecked(&fscache_n_updates_run);
38980 fscache_stat(&fscache_n_cop_update_object);
38981 object->cache->ops->update_object(object);
38982 fscache_stat_d(&fscache_n_cop_update_object);
38983 @@ -233,7 +233,7 @@ static void fscache_object_state_machine
38984 spin_lock(&object->lock);
38985 object->state = FSCACHE_OBJECT_DEAD;
38986 spin_unlock(&object->lock);
38987 - fscache_stat(&fscache_n_object_dead);
38988 + fscache_stat_unchecked(&fscache_n_object_dead);
38989 goto terminal_transit;
38990
38991 /* handle the parent cache of this object being withdrawn from
38992 @@ -248,7 +248,7 @@ static void fscache_object_state_machine
38993 spin_lock(&object->lock);
38994 object->state = FSCACHE_OBJECT_DEAD;
38995 spin_unlock(&object->lock);
38996 - fscache_stat(&fscache_n_object_dead);
38997 + fscache_stat_unchecked(&fscache_n_object_dead);
38998 goto terminal_transit;
38999
39000 /* complain about the object being woken up once it is
39001 @@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
39002 parent->cookie->def->name, cookie->def->name,
39003 object->cache->tag->name);
39004
39005 - fscache_stat(&fscache_n_object_lookups);
39006 + fscache_stat_unchecked(&fscache_n_object_lookups);
39007 fscache_stat(&fscache_n_cop_lookup_object);
39008 ret = object->cache->ops->lookup_object(object);
39009 fscache_stat_d(&fscache_n_cop_lookup_object);
39010 @@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
39011 if (ret == -ETIMEDOUT) {
39012 /* probably stuck behind another object, so move this one to
39013 * the back of the queue */
39014 - fscache_stat(&fscache_n_object_lookups_timed_out);
39015 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
39016 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39017 }
39018
39019 @@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
39020
39021 spin_lock(&object->lock);
39022 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39023 - fscache_stat(&fscache_n_object_lookups_negative);
39024 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
39025
39026 /* transit here to allow write requests to begin stacking up
39027 * and read requests to begin returning ENODATA */
39028 @@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
39029 * result, in which case there may be data available */
39030 spin_lock(&object->lock);
39031 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39032 - fscache_stat(&fscache_n_object_lookups_positive);
39033 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
39034
39035 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
39036
39037 @@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
39038 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39039 } else {
39040 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
39041 - fscache_stat(&fscache_n_object_created);
39042 + fscache_stat_unchecked(&fscache_n_object_created);
39043
39044 object->state = FSCACHE_OBJECT_AVAILABLE;
39045 spin_unlock(&object->lock);
39046 @@ -633,7 +633,7 @@ static void fscache_object_available(str
39047 fscache_enqueue_dependents(object);
39048
39049 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
39050 - fscache_stat(&fscache_n_object_avail);
39051 + fscache_stat_unchecked(&fscache_n_object_avail);
39052
39053 _leave("");
39054 }
39055 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
39056 enum fscache_checkaux result;
39057
39058 if (!object->cookie->def->check_aux) {
39059 - fscache_stat(&fscache_n_checkaux_none);
39060 + fscache_stat_unchecked(&fscache_n_checkaux_none);
39061 return FSCACHE_CHECKAUX_OKAY;
39062 }
39063
39064 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
39065 switch (result) {
39066 /* entry okay as is */
39067 case FSCACHE_CHECKAUX_OKAY:
39068 - fscache_stat(&fscache_n_checkaux_okay);
39069 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
39070 break;
39071
39072 /* entry requires update */
39073 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
39074 - fscache_stat(&fscache_n_checkaux_update);
39075 + fscache_stat_unchecked(&fscache_n_checkaux_update);
39076 break;
39077
39078 /* entry requires deletion */
39079 case FSCACHE_CHECKAUX_OBSOLETE:
39080 - fscache_stat(&fscache_n_checkaux_obsolete);
39081 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
39082 break;
39083
39084 default:
39085 diff -urNp linux-2.6.32.42/fs/fscache/operation.c linux-2.6.32.42/fs/fscache/operation.c
39086 --- linux-2.6.32.42/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
39087 +++ linux-2.6.32.42/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
39088 @@ -16,7 +16,7 @@
39089 #include <linux/seq_file.h>
39090 #include "internal.h"
39091
39092 -atomic_t fscache_op_debug_id;
39093 +atomic_unchecked_t fscache_op_debug_id;
39094 EXPORT_SYMBOL(fscache_op_debug_id);
39095
39096 /**
39097 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
39098 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
39099 ASSERTCMP(atomic_read(&op->usage), >, 0);
39100
39101 - fscache_stat(&fscache_n_op_enqueue);
39102 + fscache_stat_unchecked(&fscache_n_op_enqueue);
39103 switch (op->flags & FSCACHE_OP_TYPE) {
39104 case FSCACHE_OP_FAST:
39105 _debug("queue fast");
39106 @@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
39107 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
39108 if (op->processor)
39109 fscache_enqueue_operation(op);
39110 - fscache_stat(&fscache_n_op_run);
39111 + fscache_stat_unchecked(&fscache_n_op_run);
39112 }
39113
39114 /*
39115 @@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
39116 if (object->n_ops > 0) {
39117 atomic_inc(&op->usage);
39118 list_add_tail(&op->pend_link, &object->pending_ops);
39119 - fscache_stat(&fscache_n_op_pend);
39120 + fscache_stat_unchecked(&fscache_n_op_pend);
39121 } else if (!list_empty(&object->pending_ops)) {
39122 atomic_inc(&op->usage);
39123 list_add_tail(&op->pend_link, &object->pending_ops);
39124 - fscache_stat(&fscache_n_op_pend);
39125 + fscache_stat_unchecked(&fscache_n_op_pend);
39126 fscache_start_operations(object);
39127 } else {
39128 ASSERTCMP(object->n_in_progress, ==, 0);
39129 @@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
39130 object->n_exclusive++; /* reads and writes must wait */
39131 atomic_inc(&op->usage);
39132 list_add_tail(&op->pend_link, &object->pending_ops);
39133 - fscache_stat(&fscache_n_op_pend);
39134 + fscache_stat_unchecked(&fscache_n_op_pend);
39135 ret = 0;
39136 } else {
39137 /* not allowed to submit ops in any other state */
39138 @@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
39139 if (object->n_exclusive > 0) {
39140 atomic_inc(&op->usage);
39141 list_add_tail(&op->pend_link, &object->pending_ops);
39142 - fscache_stat(&fscache_n_op_pend);
39143 + fscache_stat_unchecked(&fscache_n_op_pend);
39144 } else if (!list_empty(&object->pending_ops)) {
39145 atomic_inc(&op->usage);
39146 list_add_tail(&op->pend_link, &object->pending_ops);
39147 - fscache_stat(&fscache_n_op_pend);
39148 + fscache_stat_unchecked(&fscache_n_op_pend);
39149 fscache_start_operations(object);
39150 } else {
39151 ASSERTCMP(object->n_exclusive, ==, 0);
39152 @@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
39153 object->n_ops++;
39154 atomic_inc(&op->usage);
39155 list_add_tail(&op->pend_link, &object->pending_ops);
39156 - fscache_stat(&fscache_n_op_pend);
39157 + fscache_stat_unchecked(&fscache_n_op_pend);
39158 ret = 0;
39159 } else if (object->state == FSCACHE_OBJECT_DYING ||
39160 object->state == FSCACHE_OBJECT_LC_DYING ||
39161 object->state == FSCACHE_OBJECT_WITHDRAWING) {
39162 - fscache_stat(&fscache_n_op_rejected);
39163 + fscache_stat_unchecked(&fscache_n_op_rejected);
39164 ret = -ENOBUFS;
39165 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
39166 fscache_report_unexpected_submission(object, op, ostate);
39167 @@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
39168
39169 ret = -EBUSY;
39170 if (!list_empty(&op->pend_link)) {
39171 - fscache_stat(&fscache_n_op_cancelled);
39172 + fscache_stat_unchecked(&fscache_n_op_cancelled);
39173 list_del_init(&op->pend_link);
39174 object->n_ops--;
39175 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
39176 @@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
39177 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
39178 BUG();
39179
39180 - fscache_stat(&fscache_n_op_release);
39181 + fscache_stat_unchecked(&fscache_n_op_release);
39182
39183 if (op->release) {
39184 op->release(op);
39185 @@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
39186 * lock, and defer it otherwise */
39187 if (!spin_trylock(&object->lock)) {
39188 _debug("defer put");
39189 - fscache_stat(&fscache_n_op_deferred_release);
39190 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
39191
39192 cache = object->cache;
39193 spin_lock(&cache->op_gc_list_lock);
39194 @@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
39195
39196 _debug("GC DEFERRED REL OBJ%x OP%x",
39197 object->debug_id, op->debug_id);
39198 - fscache_stat(&fscache_n_op_gc);
39199 + fscache_stat_unchecked(&fscache_n_op_gc);
39200
39201 ASSERTCMP(atomic_read(&op->usage), ==, 0);
39202
39203 diff -urNp linux-2.6.32.42/fs/fscache/page.c linux-2.6.32.42/fs/fscache/page.c
39204 --- linux-2.6.32.42/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
39205 +++ linux-2.6.32.42/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
39206 @@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
39207 val = radix_tree_lookup(&cookie->stores, page->index);
39208 if (!val) {
39209 rcu_read_unlock();
39210 - fscache_stat(&fscache_n_store_vmscan_not_storing);
39211 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
39212 __fscache_uncache_page(cookie, page);
39213 return true;
39214 }
39215 @@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
39216 spin_unlock(&cookie->stores_lock);
39217
39218 if (xpage) {
39219 - fscache_stat(&fscache_n_store_vmscan_cancelled);
39220 - fscache_stat(&fscache_n_store_radix_deletes);
39221 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
39222 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39223 ASSERTCMP(xpage, ==, page);
39224 } else {
39225 - fscache_stat(&fscache_n_store_vmscan_gone);
39226 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
39227 }
39228
39229 wake_up_bit(&cookie->flags, 0);
39230 @@ -106,7 +106,7 @@ page_busy:
39231 /* we might want to wait here, but that could deadlock the allocator as
39232 * the slow-work threads writing to the cache may all end up sleeping
39233 * on memory allocation */
39234 - fscache_stat(&fscache_n_store_vmscan_busy);
39235 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
39236 return false;
39237 }
39238 EXPORT_SYMBOL(__fscache_maybe_release_page);
39239 @@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
39240 FSCACHE_COOKIE_STORING_TAG);
39241 if (!radix_tree_tag_get(&cookie->stores, page->index,
39242 FSCACHE_COOKIE_PENDING_TAG)) {
39243 - fscache_stat(&fscache_n_store_radix_deletes);
39244 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39245 xpage = radix_tree_delete(&cookie->stores, page->index);
39246 }
39247 spin_unlock(&cookie->stores_lock);
39248 @@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
39249
39250 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
39251
39252 - fscache_stat(&fscache_n_attr_changed_calls);
39253 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
39254
39255 if (fscache_object_is_active(object)) {
39256 fscache_set_op_state(op, "CallFS");
39257 @@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
39258
39259 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39260
39261 - fscache_stat(&fscache_n_attr_changed);
39262 + fscache_stat_unchecked(&fscache_n_attr_changed);
39263
39264 op = kzalloc(sizeof(*op), GFP_KERNEL);
39265 if (!op) {
39266 - fscache_stat(&fscache_n_attr_changed_nomem);
39267 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
39268 _leave(" = -ENOMEM");
39269 return -ENOMEM;
39270 }
39271 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
39272 if (fscache_submit_exclusive_op(object, op) < 0)
39273 goto nobufs;
39274 spin_unlock(&cookie->lock);
39275 - fscache_stat(&fscache_n_attr_changed_ok);
39276 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
39277 fscache_put_operation(op);
39278 _leave(" = 0");
39279 return 0;
39280 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
39281 nobufs:
39282 spin_unlock(&cookie->lock);
39283 kfree(op);
39284 - fscache_stat(&fscache_n_attr_changed_nobufs);
39285 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
39286 _leave(" = %d", -ENOBUFS);
39287 return -ENOBUFS;
39288 }
39289 @@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
39290 /* allocate a retrieval operation and attempt to submit it */
39291 op = kzalloc(sizeof(*op), GFP_NOIO);
39292 if (!op) {
39293 - fscache_stat(&fscache_n_retrievals_nomem);
39294 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39295 return NULL;
39296 }
39297
39298 @@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
39299 return 0;
39300 }
39301
39302 - fscache_stat(&fscache_n_retrievals_wait);
39303 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
39304
39305 jif = jiffies;
39306 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
39307 fscache_wait_bit_interruptible,
39308 TASK_INTERRUPTIBLE) != 0) {
39309 - fscache_stat(&fscache_n_retrievals_intr);
39310 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39311 _leave(" = -ERESTARTSYS");
39312 return -ERESTARTSYS;
39313 }
39314 @@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
39315 */
39316 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
39317 struct fscache_retrieval *op,
39318 - atomic_t *stat_op_waits,
39319 - atomic_t *stat_object_dead)
39320 + atomic_unchecked_t *stat_op_waits,
39321 + atomic_unchecked_t *stat_object_dead)
39322 {
39323 int ret;
39324
39325 @@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
39326 goto check_if_dead;
39327
39328 _debug(">>> WT");
39329 - fscache_stat(stat_op_waits);
39330 + fscache_stat_unchecked(stat_op_waits);
39331 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
39332 fscache_wait_bit_interruptible,
39333 TASK_INTERRUPTIBLE) < 0) {
39334 @@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
39335
39336 check_if_dead:
39337 if (unlikely(fscache_object_is_dead(object))) {
39338 - fscache_stat(stat_object_dead);
39339 + fscache_stat_unchecked(stat_object_dead);
39340 return -ENOBUFS;
39341 }
39342 return 0;
39343 @@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
39344
39345 _enter("%p,%p,,,", cookie, page);
39346
39347 - fscache_stat(&fscache_n_retrievals);
39348 + fscache_stat_unchecked(&fscache_n_retrievals);
39349
39350 if (hlist_empty(&cookie->backing_objects))
39351 goto nobufs;
39352 @@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
39353 goto nobufs_unlock;
39354 spin_unlock(&cookie->lock);
39355
39356 - fscache_stat(&fscache_n_retrieval_ops);
39357 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
39358
39359 /* pin the netfs read context in case we need to do the actual netfs
39360 * read because we've encountered a cache read failure */
39361 @@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
39362
39363 error:
39364 if (ret == -ENOMEM)
39365 - fscache_stat(&fscache_n_retrievals_nomem);
39366 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39367 else if (ret == -ERESTARTSYS)
39368 - fscache_stat(&fscache_n_retrievals_intr);
39369 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39370 else if (ret == -ENODATA)
39371 - fscache_stat(&fscache_n_retrievals_nodata);
39372 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39373 else if (ret < 0)
39374 - fscache_stat(&fscache_n_retrievals_nobufs);
39375 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39376 else
39377 - fscache_stat(&fscache_n_retrievals_ok);
39378 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
39379
39380 fscache_put_retrieval(op);
39381 _leave(" = %d", ret);
39382 @@ -453,7 +453,7 @@ nobufs_unlock:
39383 spin_unlock(&cookie->lock);
39384 kfree(op);
39385 nobufs:
39386 - fscache_stat(&fscache_n_retrievals_nobufs);
39387 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39388 _leave(" = -ENOBUFS");
39389 return -ENOBUFS;
39390 }
39391 @@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
39392
39393 _enter("%p,,%d,,,", cookie, *nr_pages);
39394
39395 - fscache_stat(&fscache_n_retrievals);
39396 + fscache_stat_unchecked(&fscache_n_retrievals);
39397
39398 if (hlist_empty(&cookie->backing_objects))
39399 goto nobufs;
39400 @@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
39401 goto nobufs_unlock;
39402 spin_unlock(&cookie->lock);
39403
39404 - fscache_stat(&fscache_n_retrieval_ops);
39405 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
39406
39407 /* pin the netfs read context in case we need to do the actual netfs
39408 * read because we've encountered a cache read failure */
39409 @@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
39410
39411 error:
39412 if (ret == -ENOMEM)
39413 - fscache_stat(&fscache_n_retrievals_nomem);
39414 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39415 else if (ret == -ERESTARTSYS)
39416 - fscache_stat(&fscache_n_retrievals_intr);
39417 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
39418 else if (ret == -ENODATA)
39419 - fscache_stat(&fscache_n_retrievals_nodata);
39420 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
39421 else if (ret < 0)
39422 - fscache_stat(&fscache_n_retrievals_nobufs);
39423 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39424 else
39425 - fscache_stat(&fscache_n_retrievals_ok);
39426 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
39427
39428 fscache_put_retrieval(op);
39429 _leave(" = %d", ret);
39430 @@ -570,7 +570,7 @@ nobufs_unlock:
39431 spin_unlock(&cookie->lock);
39432 kfree(op);
39433 nobufs:
39434 - fscache_stat(&fscache_n_retrievals_nobufs);
39435 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
39436 _leave(" = -ENOBUFS");
39437 return -ENOBUFS;
39438 }
39439 @@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
39440
39441 _enter("%p,%p,,,", cookie, page);
39442
39443 - fscache_stat(&fscache_n_allocs);
39444 + fscache_stat_unchecked(&fscache_n_allocs);
39445
39446 if (hlist_empty(&cookie->backing_objects))
39447 goto nobufs;
39448 @@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
39449 goto nobufs_unlock;
39450 spin_unlock(&cookie->lock);
39451
39452 - fscache_stat(&fscache_n_alloc_ops);
39453 + fscache_stat_unchecked(&fscache_n_alloc_ops);
39454
39455 ret = fscache_wait_for_retrieval_activation(
39456 object, op,
39457 @@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
39458
39459 error:
39460 if (ret == -ERESTARTSYS)
39461 - fscache_stat(&fscache_n_allocs_intr);
39462 + fscache_stat_unchecked(&fscache_n_allocs_intr);
39463 else if (ret < 0)
39464 - fscache_stat(&fscache_n_allocs_nobufs);
39465 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
39466 else
39467 - fscache_stat(&fscache_n_allocs_ok);
39468 + fscache_stat_unchecked(&fscache_n_allocs_ok);
39469
39470 fscache_put_retrieval(op);
39471 _leave(" = %d", ret);
39472 @@ -651,7 +651,7 @@ nobufs_unlock:
39473 spin_unlock(&cookie->lock);
39474 kfree(op);
39475 nobufs:
39476 - fscache_stat(&fscache_n_allocs_nobufs);
39477 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
39478 _leave(" = -ENOBUFS");
39479 return -ENOBUFS;
39480 }
39481 @@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
39482
39483 spin_lock(&cookie->stores_lock);
39484
39485 - fscache_stat(&fscache_n_store_calls);
39486 + fscache_stat_unchecked(&fscache_n_store_calls);
39487
39488 /* find a page to store */
39489 page = NULL;
39490 @@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
39491 page = results[0];
39492 _debug("gang %d [%lx]", n, page->index);
39493 if (page->index > op->store_limit) {
39494 - fscache_stat(&fscache_n_store_pages_over_limit);
39495 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
39496 goto superseded;
39497 }
39498
39499 @@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
39500
39501 if (page) {
39502 fscache_set_op_state(&op->op, "Store");
39503 - fscache_stat(&fscache_n_store_pages);
39504 + fscache_stat_unchecked(&fscache_n_store_pages);
39505 fscache_stat(&fscache_n_cop_write_page);
39506 ret = object->cache->ops->write_page(op, page);
39507 fscache_stat_d(&fscache_n_cop_write_page);
39508 @@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
39509 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39510 ASSERT(PageFsCache(page));
39511
39512 - fscache_stat(&fscache_n_stores);
39513 + fscache_stat_unchecked(&fscache_n_stores);
39514
39515 op = kzalloc(sizeof(*op), GFP_NOIO);
39516 if (!op)
39517 @@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
39518 spin_unlock(&cookie->stores_lock);
39519 spin_unlock(&object->lock);
39520
39521 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
39522 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
39523 op->store_limit = object->store_limit;
39524
39525 if (fscache_submit_op(object, &op->op) < 0)
39526 @@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
39527
39528 spin_unlock(&cookie->lock);
39529 radix_tree_preload_end();
39530 - fscache_stat(&fscache_n_store_ops);
39531 - fscache_stat(&fscache_n_stores_ok);
39532 + fscache_stat_unchecked(&fscache_n_store_ops);
39533 + fscache_stat_unchecked(&fscache_n_stores_ok);
39534
39535 /* the slow work queue now carries its own ref on the object */
39536 fscache_put_operation(&op->op);
39537 @@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
39538 return 0;
39539
39540 already_queued:
39541 - fscache_stat(&fscache_n_stores_again);
39542 + fscache_stat_unchecked(&fscache_n_stores_again);
39543 already_pending:
39544 spin_unlock(&cookie->stores_lock);
39545 spin_unlock(&object->lock);
39546 spin_unlock(&cookie->lock);
39547 radix_tree_preload_end();
39548 kfree(op);
39549 - fscache_stat(&fscache_n_stores_ok);
39550 + fscache_stat_unchecked(&fscache_n_stores_ok);
39551 _leave(" = 0");
39552 return 0;
39553
39554 @@ -886,14 +886,14 @@ nobufs:
39555 spin_unlock(&cookie->lock);
39556 radix_tree_preload_end();
39557 kfree(op);
39558 - fscache_stat(&fscache_n_stores_nobufs);
39559 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
39560 _leave(" = -ENOBUFS");
39561 return -ENOBUFS;
39562
39563 nomem_free:
39564 kfree(op);
39565 nomem:
39566 - fscache_stat(&fscache_n_stores_oom);
39567 + fscache_stat_unchecked(&fscache_n_stores_oom);
39568 _leave(" = -ENOMEM");
39569 return -ENOMEM;
39570 }
39571 @@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
39572 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39573 ASSERTCMP(page, !=, NULL);
39574
39575 - fscache_stat(&fscache_n_uncaches);
39576 + fscache_stat_unchecked(&fscache_n_uncaches);
39577
39578 /* cache withdrawal may beat us to it */
39579 if (!PageFsCache(page))
39580 @@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
39581 unsigned long loop;
39582
39583 #ifdef CONFIG_FSCACHE_STATS
39584 - atomic_add(pagevec->nr, &fscache_n_marks);
39585 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
39586 #endif
39587
39588 for (loop = 0; loop < pagevec->nr; loop++) {
39589 diff -urNp linux-2.6.32.42/fs/fscache/stats.c linux-2.6.32.42/fs/fscache/stats.c
39590 --- linux-2.6.32.42/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
39591 +++ linux-2.6.32.42/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
39592 @@ -18,95 +18,95 @@
39593 /*
39594 * operation counters
39595 */
39596 -atomic_t fscache_n_op_pend;
39597 -atomic_t fscache_n_op_run;
39598 -atomic_t fscache_n_op_enqueue;
39599 -atomic_t fscache_n_op_requeue;
39600 -atomic_t fscache_n_op_deferred_release;
39601 -atomic_t fscache_n_op_release;
39602 -atomic_t fscache_n_op_gc;
39603 -atomic_t fscache_n_op_cancelled;
39604 -atomic_t fscache_n_op_rejected;
39605 -
39606 -atomic_t fscache_n_attr_changed;
39607 -atomic_t fscache_n_attr_changed_ok;
39608 -atomic_t fscache_n_attr_changed_nobufs;
39609 -atomic_t fscache_n_attr_changed_nomem;
39610 -atomic_t fscache_n_attr_changed_calls;
39611 -
39612 -atomic_t fscache_n_allocs;
39613 -atomic_t fscache_n_allocs_ok;
39614 -atomic_t fscache_n_allocs_wait;
39615 -atomic_t fscache_n_allocs_nobufs;
39616 -atomic_t fscache_n_allocs_intr;
39617 -atomic_t fscache_n_allocs_object_dead;
39618 -atomic_t fscache_n_alloc_ops;
39619 -atomic_t fscache_n_alloc_op_waits;
39620 -
39621 -atomic_t fscache_n_retrievals;
39622 -atomic_t fscache_n_retrievals_ok;
39623 -atomic_t fscache_n_retrievals_wait;
39624 -atomic_t fscache_n_retrievals_nodata;
39625 -atomic_t fscache_n_retrievals_nobufs;
39626 -atomic_t fscache_n_retrievals_intr;
39627 -atomic_t fscache_n_retrievals_nomem;
39628 -atomic_t fscache_n_retrievals_object_dead;
39629 -atomic_t fscache_n_retrieval_ops;
39630 -atomic_t fscache_n_retrieval_op_waits;
39631 -
39632 -atomic_t fscache_n_stores;
39633 -atomic_t fscache_n_stores_ok;
39634 -atomic_t fscache_n_stores_again;
39635 -atomic_t fscache_n_stores_nobufs;
39636 -atomic_t fscache_n_stores_oom;
39637 -atomic_t fscache_n_store_ops;
39638 -atomic_t fscache_n_store_calls;
39639 -atomic_t fscache_n_store_pages;
39640 -atomic_t fscache_n_store_radix_deletes;
39641 -atomic_t fscache_n_store_pages_over_limit;
39642 -
39643 -atomic_t fscache_n_store_vmscan_not_storing;
39644 -atomic_t fscache_n_store_vmscan_gone;
39645 -atomic_t fscache_n_store_vmscan_busy;
39646 -atomic_t fscache_n_store_vmscan_cancelled;
39647 -
39648 -atomic_t fscache_n_marks;
39649 -atomic_t fscache_n_uncaches;
39650 -
39651 -atomic_t fscache_n_acquires;
39652 -atomic_t fscache_n_acquires_null;
39653 -atomic_t fscache_n_acquires_no_cache;
39654 -atomic_t fscache_n_acquires_ok;
39655 -atomic_t fscache_n_acquires_nobufs;
39656 -atomic_t fscache_n_acquires_oom;
39657 -
39658 -atomic_t fscache_n_updates;
39659 -atomic_t fscache_n_updates_null;
39660 -atomic_t fscache_n_updates_run;
39661 -
39662 -atomic_t fscache_n_relinquishes;
39663 -atomic_t fscache_n_relinquishes_null;
39664 -atomic_t fscache_n_relinquishes_waitcrt;
39665 -atomic_t fscache_n_relinquishes_retire;
39666 -
39667 -atomic_t fscache_n_cookie_index;
39668 -atomic_t fscache_n_cookie_data;
39669 -atomic_t fscache_n_cookie_special;
39670 -
39671 -atomic_t fscache_n_object_alloc;
39672 -atomic_t fscache_n_object_no_alloc;
39673 -atomic_t fscache_n_object_lookups;
39674 -atomic_t fscache_n_object_lookups_negative;
39675 -atomic_t fscache_n_object_lookups_positive;
39676 -atomic_t fscache_n_object_lookups_timed_out;
39677 -atomic_t fscache_n_object_created;
39678 -atomic_t fscache_n_object_avail;
39679 -atomic_t fscache_n_object_dead;
39680 -
39681 -atomic_t fscache_n_checkaux_none;
39682 -atomic_t fscache_n_checkaux_okay;
39683 -atomic_t fscache_n_checkaux_update;
39684 -atomic_t fscache_n_checkaux_obsolete;
39685 +atomic_unchecked_t fscache_n_op_pend;
39686 +atomic_unchecked_t fscache_n_op_run;
39687 +atomic_unchecked_t fscache_n_op_enqueue;
39688 +atomic_unchecked_t fscache_n_op_requeue;
39689 +atomic_unchecked_t fscache_n_op_deferred_release;
39690 +atomic_unchecked_t fscache_n_op_release;
39691 +atomic_unchecked_t fscache_n_op_gc;
39692 +atomic_unchecked_t fscache_n_op_cancelled;
39693 +atomic_unchecked_t fscache_n_op_rejected;
39694 +
39695 +atomic_unchecked_t fscache_n_attr_changed;
39696 +atomic_unchecked_t fscache_n_attr_changed_ok;
39697 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
39698 +atomic_unchecked_t fscache_n_attr_changed_nomem;
39699 +atomic_unchecked_t fscache_n_attr_changed_calls;
39700 +
39701 +atomic_unchecked_t fscache_n_allocs;
39702 +atomic_unchecked_t fscache_n_allocs_ok;
39703 +atomic_unchecked_t fscache_n_allocs_wait;
39704 +atomic_unchecked_t fscache_n_allocs_nobufs;
39705 +atomic_unchecked_t fscache_n_allocs_intr;
39706 +atomic_unchecked_t fscache_n_allocs_object_dead;
39707 +atomic_unchecked_t fscache_n_alloc_ops;
39708 +atomic_unchecked_t fscache_n_alloc_op_waits;
39709 +
39710 +atomic_unchecked_t fscache_n_retrievals;
39711 +atomic_unchecked_t fscache_n_retrievals_ok;
39712 +atomic_unchecked_t fscache_n_retrievals_wait;
39713 +atomic_unchecked_t fscache_n_retrievals_nodata;
39714 +atomic_unchecked_t fscache_n_retrievals_nobufs;
39715 +atomic_unchecked_t fscache_n_retrievals_intr;
39716 +atomic_unchecked_t fscache_n_retrievals_nomem;
39717 +atomic_unchecked_t fscache_n_retrievals_object_dead;
39718 +atomic_unchecked_t fscache_n_retrieval_ops;
39719 +atomic_unchecked_t fscache_n_retrieval_op_waits;
39720 +
39721 +atomic_unchecked_t fscache_n_stores;
39722 +atomic_unchecked_t fscache_n_stores_ok;
39723 +atomic_unchecked_t fscache_n_stores_again;
39724 +atomic_unchecked_t fscache_n_stores_nobufs;
39725 +atomic_unchecked_t fscache_n_stores_oom;
39726 +atomic_unchecked_t fscache_n_store_ops;
39727 +atomic_unchecked_t fscache_n_store_calls;
39728 +atomic_unchecked_t fscache_n_store_pages;
39729 +atomic_unchecked_t fscache_n_store_radix_deletes;
39730 +atomic_unchecked_t fscache_n_store_pages_over_limit;
39731 +
39732 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
39733 +atomic_unchecked_t fscache_n_store_vmscan_gone;
39734 +atomic_unchecked_t fscache_n_store_vmscan_busy;
39735 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
39736 +
39737 +atomic_unchecked_t fscache_n_marks;
39738 +atomic_unchecked_t fscache_n_uncaches;
39739 +
39740 +atomic_unchecked_t fscache_n_acquires;
39741 +atomic_unchecked_t fscache_n_acquires_null;
39742 +atomic_unchecked_t fscache_n_acquires_no_cache;
39743 +atomic_unchecked_t fscache_n_acquires_ok;
39744 +atomic_unchecked_t fscache_n_acquires_nobufs;
39745 +atomic_unchecked_t fscache_n_acquires_oom;
39746 +
39747 +atomic_unchecked_t fscache_n_updates;
39748 +atomic_unchecked_t fscache_n_updates_null;
39749 +atomic_unchecked_t fscache_n_updates_run;
39750 +
39751 +atomic_unchecked_t fscache_n_relinquishes;
39752 +atomic_unchecked_t fscache_n_relinquishes_null;
39753 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
39754 +atomic_unchecked_t fscache_n_relinquishes_retire;
39755 +
39756 +atomic_unchecked_t fscache_n_cookie_index;
39757 +atomic_unchecked_t fscache_n_cookie_data;
39758 +atomic_unchecked_t fscache_n_cookie_special;
39759 +
39760 +atomic_unchecked_t fscache_n_object_alloc;
39761 +atomic_unchecked_t fscache_n_object_no_alloc;
39762 +atomic_unchecked_t fscache_n_object_lookups;
39763 +atomic_unchecked_t fscache_n_object_lookups_negative;
39764 +atomic_unchecked_t fscache_n_object_lookups_positive;
39765 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
39766 +atomic_unchecked_t fscache_n_object_created;
39767 +atomic_unchecked_t fscache_n_object_avail;
39768 +atomic_unchecked_t fscache_n_object_dead;
39769 +
39770 +atomic_unchecked_t fscache_n_checkaux_none;
39771 +atomic_unchecked_t fscache_n_checkaux_okay;
39772 +atomic_unchecked_t fscache_n_checkaux_update;
39773 +atomic_unchecked_t fscache_n_checkaux_obsolete;
39774
39775 atomic_t fscache_n_cop_alloc_object;
39776 atomic_t fscache_n_cop_lookup_object;
39777 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
39778 seq_puts(m, "FS-Cache statistics\n");
39779
39780 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
39781 - atomic_read(&fscache_n_cookie_index),
39782 - atomic_read(&fscache_n_cookie_data),
39783 - atomic_read(&fscache_n_cookie_special));
39784 + atomic_read_unchecked(&fscache_n_cookie_index),
39785 + atomic_read_unchecked(&fscache_n_cookie_data),
39786 + atomic_read_unchecked(&fscache_n_cookie_special));
39787
39788 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
39789 - atomic_read(&fscache_n_object_alloc),
39790 - atomic_read(&fscache_n_object_no_alloc),
39791 - atomic_read(&fscache_n_object_avail),
39792 - atomic_read(&fscache_n_object_dead));
39793 + atomic_read_unchecked(&fscache_n_object_alloc),
39794 + atomic_read_unchecked(&fscache_n_object_no_alloc),
39795 + atomic_read_unchecked(&fscache_n_object_avail),
39796 + atomic_read_unchecked(&fscache_n_object_dead));
39797 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
39798 - atomic_read(&fscache_n_checkaux_none),
39799 - atomic_read(&fscache_n_checkaux_okay),
39800 - atomic_read(&fscache_n_checkaux_update),
39801 - atomic_read(&fscache_n_checkaux_obsolete));
39802 + atomic_read_unchecked(&fscache_n_checkaux_none),
39803 + atomic_read_unchecked(&fscache_n_checkaux_okay),
39804 + atomic_read_unchecked(&fscache_n_checkaux_update),
39805 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
39806
39807 seq_printf(m, "Pages : mrk=%u unc=%u\n",
39808 - atomic_read(&fscache_n_marks),
39809 - atomic_read(&fscache_n_uncaches));
39810 + atomic_read_unchecked(&fscache_n_marks),
39811 + atomic_read_unchecked(&fscache_n_uncaches));
39812
39813 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
39814 " oom=%u\n",
39815 - atomic_read(&fscache_n_acquires),
39816 - atomic_read(&fscache_n_acquires_null),
39817 - atomic_read(&fscache_n_acquires_no_cache),
39818 - atomic_read(&fscache_n_acquires_ok),
39819 - atomic_read(&fscache_n_acquires_nobufs),
39820 - atomic_read(&fscache_n_acquires_oom));
39821 + atomic_read_unchecked(&fscache_n_acquires),
39822 + atomic_read_unchecked(&fscache_n_acquires_null),
39823 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
39824 + atomic_read_unchecked(&fscache_n_acquires_ok),
39825 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
39826 + atomic_read_unchecked(&fscache_n_acquires_oom));
39827
39828 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
39829 - atomic_read(&fscache_n_object_lookups),
39830 - atomic_read(&fscache_n_object_lookups_negative),
39831 - atomic_read(&fscache_n_object_lookups_positive),
39832 - atomic_read(&fscache_n_object_lookups_timed_out),
39833 - atomic_read(&fscache_n_object_created));
39834 + atomic_read_unchecked(&fscache_n_object_lookups),
39835 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
39836 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
39837 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
39838 + atomic_read_unchecked(&fscache_n_object_created));
39839
39840 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
39841 - atomic_read(&fscache_n_updates),
39842 - atomic_read(&fscache_n_updates_null),
39843 - atomic_read(&fscache_n_updates_run));
39844 + atomic_read_unchecked(&fscache_n_updates),
39845 + atomic_read_unchecked(&fscache_n_updates_null),
39846 + atomic_read_unchecked(&fscache_n_updates_run));
39847
39848 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
39849 - atomic_read(&fscache_n_relinquishes),
39850 - atomic_read(&fscache_n_relinquishes_null),
39851 - atomic_read(&fscache_n_relinquishes_waitcrt),
39852 - atomic_read(&fscache_n_relinquishes_retire));
39853 + atomic_read_unchecked(&fscache_n_relinquishes),
39854 + atomic_read_unchecked(&fscache_n_relinquishes_null),
39855 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
39856 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
39857
39858 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
39859 - atomic_read(&fscache_n_attr_changed),
39860 - atomic_read(&fscache_n_attr_changed_ok),
39861 - atomic_read(&fscache_n_attr_changed_nobufs),
39862 - atomic_read(&fscache_n_attr_changed_nomem),
39863 - atomic_read(&fscache_n_attr_changed_calls));
39864 + atomic_read_unchecked(&fscache_n_attr_changed),
39865 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
39866 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
39867 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
39868 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
39869
39870 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
39871 - atomic_read(&fscache_n_allocs),
39872 - atomic_read(&fscache_n_allocs_ok),
39873 - atomic_read(&fscache_n_allocs_wait),
39874 - atomic_read(&fscache_n_allocs_nobufs),
39875 - atomic_read(&fscache_n_allocs_intr));
39876 + atomic_read_unchecked(&fscache_n_allocs),
39877 + atomic_read_unchecked(&fscache_n_allocs_ok),
39878 + atomic_read_unchecked(&fscache_n_allocs_wait),
39879 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
39880 + atomic_read_unchecked(&fscache_n_allocs_intr));
39881 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
39882 - atomic_read(&fscache_n_alloc_ops),
39883 - atomic_read(&fscache_n_alloc_op_waits),
39884 - atomic_read(&fscache_n_allocs_object_dead));
39885 + atomic_read_unchecked(&fscache_n_alloc_ops),
39886 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
39887 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
39888
39889 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
39890 " int=%u oom=%u\n",
39891 - atomic_read(&fscache_n_retrievals),
39892 - atomic_read(&fscache_n_retrievals_ok),
39893 - atomic_read(&fscache_n_retrievals_wait),
39894 - atomic_read(&fscache_n_retrievals_nodata),
39895 - atomic_read(&fscache_n_retrievals_nobufs),
39896 - atomic_read(&fscache_n_retrievals_intr),
39897 - atomic_read(&fscache_n_retrievals_nomem));
39898 + atomic_read_unchecked(&fscache_n_retrievals),
39899 + atomic_read_unchecked(&fscache_n_retrievals_ok),
39900 + atomic_read_unchecked(&fscache_n_retrievals_wait),
39901 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
39902 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
39903 + atomic_read_unchecked(&fscache_n_retrievals_intr),
39904 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
39905 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
39906 - atomic_read(&fscache_n_retrieval_ops),
39907 - atomic_read(&fscache_n_retrieval_op_waits),
39908 - atomic_read(&fscache_n_retrievals_object_dead));
39909 + atomic_read_unchecked(&fscache_n_retrieval_ops),
39910 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
39911 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
39912
39913 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
39914 - atomic_read(&fscache_n_stores),
39915 - atomic_read(&fscache_n_stores_ok),
39916 - atomic_read(&fscache_n_stores_again),
39917 - atomic_read(&fscache_n_stores_nobufs),
39918 - atomic_read(&fscache_n_stores_oom));
39919 + atomic_read_unchecked(&fscache_n_stores),
39920 + atomic_read_unchecked(&fscache_n_stores_ok),
39921 + atomic_read_unchecked(&fscache_n_stores_again),
39922 + atomic_read_unchecked(&fscache_n_stores_nobufs),
39923 + atomic_read_unchecked(&fscache_n_stores_oom));
39924 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
39925 - atomic_read(&fscache_n_store_ops),
39926 - atomic_read(&fscache_n_store_calls),
39927 - atomic_read(&fscache_n_store_pages),
39928 - atomic_read(&fscache_n_store_radix_deletes),
39929 - atomic_read(&fscache_n_store_pages_over_limit));
39930 + atomic_read_unchecked(&fscache_n_store_ops),
39931 + atomic_read_unchecked(&fscache_n_store_calls),
39932 + atomic_read_unchecked(&fscache_n_store_pages),
39933 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
39934 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
39935
39936 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
39937 - atomic_read(&fscache_n_store_vmscan_not_storing),
39938 - atomic_read(&fscache_n_store_vmscan_gone),
39939 - atomic_read(&fscache_n_store_vmscan_busy),
39940 - atomic_read(&fscache_n_store_vmscan_cancelled));
39941 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
39942 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
39943 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
39944 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
39945
39946 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
39947 - atomic_read(&fscache_n_op_pend),
39948 - atomic_read(&fscache_n_op_run),
39949 - atomic_read(&fscache_n_op_enqueue),
39950 - atomic_read(&fscache_n_op_cancelled),
39951 - atomic_read(&fscache_n_op_rejected));
39952 + atomic_read_unchecked(&fscache_n_op_pend),
39953 + atomic_read_unchecked(&fscache_n_op_run),
39954 + atomic_read_unchecked(&fscache_n_op_enqueue),
39955 + atomic_read_unchecked(&fscache_n_op_cancelled),
39956 + atomic_read_unchecked(&fscache_n_op_rejected));
39957 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
39958 - atomic_read(&fscache_n_op_deferred_release),
39959 - atomic_read(&fscache_n_op_release),
39960 - atomic_read(&fscache_n_op_gc));
39961 + atomic_read_unchecked(&fscache_n_op_deferred_release),
39962 + atomic_read_unchecked(&fscache_n_op_release),
39963 + atomic_read_unchecked(&fscache_n_op_gc));
39964
39965 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
39966 atomic_read(&fscache_n_cop_alloc_object),
39967 diff -urNp linux-2.6.32.42/fs/fs_struct.c linux-2.6.32.42/fs/fs_struct.c
39968 --- linux-2.6.32.42/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
39969 +++ linux-2.6.32.42/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
39970 @@ -4,6 +4,7 @@
39971 #include <linux/path.h>
39972 #include <linux/slab.h>
39973 #include <linux/fs_struct.h>
39974 +#include <linux/grsecurity.h>
39975
39976 /*
39977 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
39978 @@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
39979 old_root = fs->root;
39980 fs->root = *path;
39981 path_get(path);
39982 + gr_set_chroot_entries(current, path);
39983 write_unlock(&fs->lock);
39984 if (old_root.dentry)
39985 path_put(&old_root);
39986 @@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
39987 && fs->root.mnt == old_root->mnt) {
39988 path_get(new_root);
39989 fs->root = *new_root;
39990 + gr_set_chroot_entries(p, new_root);
39991 count++;
39992 }
39993 if (fs->pwd.dentry == old_root->dentry
39994 @@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
39995 task_lock(tsk);
39996 write_lock(&fs->lock);
39997 tsk->fs = NULL;
39998 - kill = !--fs->users;
39999 + gr_clear_chroot_entries(tsk);
40000 + kill = !atomic_dec_return(&fs->users);
40001 write_unlock(&fs->lock);
40002 task_unlock(tsk);
40003 if (kill)
40004 @@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
40005 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
40006 /* We don't need to lock fs - think why ;-) */
40007 if (fs) {
40008 - fs->users = 1;
40009 + atomic_set(&fs->users, 1);
40010 fs->in_exec = 0;
40011 rwlock_init(&fs->lock);
40012 fs->umask = old->umask;
40013 @@ -127,8 +131,9 @@ int unshare_fs_struct(void)
40014
40015 task_lock(current);
40016 write_lock(&fs->lock);
40017 - kill = !--fs->users;
40018 + kill = !atomic_dec_return(&fs->users);
40019 current->fs = new_fs;
40020 + gr_set_chroot_entries(current, &new_fs->root);
40021 write_unlock(&fs->lock);
40022 task_unlock(current);
40023
40024 @@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
40025
40026 /* to be mentioned only in INIT_TASK */
40027 struct fs_struct init_fs = {
40028 - .users = 1,
40029 + .users = ATOMIC_INIT(1),
40030 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
40031 .umask = 0022,
40032 };
40033 @@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
40034 task_lock(current);
40035
40036 write_lock(&init_fs.lock);
40037 - init_fs.users++;
40038 + atomic_inc(&init_fs.users);
40039 write_unlock(&init_fs.lock);
40040
40041 write_lock(&fs->lock);
40042 current->fs = &init_fs;
40043 - kill = !--fs->users;
40044 + gr_set_chroot_entries(current, &current->fs->root);
40045 + kill = !atomic_dec_return(&fs->users);
40046 write_unlock(&fs->lock);
40047
40048 task_unlock(current);
40049 diff -urNp linux-2.6.32.42/fs/fuse/cuse.c linux-2.6.32.42/fs/fuse/cuse.c
40050 --- linux-2.6.32.42/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
40051 +++ linux-2.6.32.42/fs/fuse/cuse.c 2011-04-17 15:56:46.000000000 -0400
40052 @@ -528,8 +528,18 @@ static int cuse_channel_release(struct i
40053 return rc;
40054 }
40055
40056 -static struct file_operations cuse_channel_fops; /* initialized during init */
40057 -
40058 +static const struct file_operations cuse_channel_fops = { /* initialized during init */
40059 + .owner = THIS_MODULE,
40060 + .llseek = no_llseek,
40061 + .read = do_sync_read,
40062 + .aio_read = fuse_dev_read,
40063 + .write = do_sync_write,
40064 + .aio_write = fuse_dev_write,
40065 + .poll = fuse_dev_poll,
40066 + .open = cuse_channel_open,
40067 + .release = cuse_channel_release,
40068 + .fasync = fuse_dev_fasync,
40069 +};
40070
40071 /**************************************************************************
40072 * Misc stuff and module initializatiion
40073 @@ -575,12 +585,6 @@ static int __init cuse_init(void)
40074 for (i = 0; i < CUSE_CONNTBL_LEN; i++)
40075 INIT_LIST_HEAD(&cuse_conntbl[i]);
40076
40077 - /* inherit and extend fuse_dev_operations */
40078 - cuse_channel_fops = fuse_dev_operations;
40079 - cuse_channel_fops.owner = THIS_MODULE;
40080 - cuse_channel_fops.open = cuse_channel_open;
40081 - cuse_channel_fops.release = cuse_channel_release;
40082 -
40083 cuse_class = class_create(THIS_MODULE, "cuse");
40084 if (IS_ERR(cuse_class))
40085 return PTR_ERR(cuse_class);
40086 diff -urNp linux-2.6.32.42/fs/fuse/dev.c linux-2.6.32.42/fs/fuse/dev.c
40087 --- linux-2.6.32.42/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
40088 +++ linux-2.6.32.42/fs/fuse/dev.c 2011-04-17 15:56:46.000000000 -0400
40089 @@ -745,7 +745,7 @@ __releases(&fc->lock)
40090 * request_end(). Otherwise add it to the processing list, and set
40091 * the 'sent' flag.
40092 */
40093 -static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40094 +ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40095 unsigned long nr_segs, loff_t pos)
40096 {
40097 int err;
40098 @@ -827,6 +827,7 @@ static ssize_t fuse_dev_read(struct kioc
40099 spin_unlock(&fc->lock);
40100 return err;
40101 }
40102 +EXPORT_SYMBOL_GPL(fuse_dev_read);
40103
40104 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
40105 struct fuse_copy_state *cs)
40106 @@ -885,7 +886,7 @@ static int fuse_notify_inval_entry(struc
40107 {
40108 struct fuse_notify_inval_entry_out outarg;
40109 int err = -EINVAL;
40110 - char buf[FUSE_NAME_MAX+1];
40111 + char *buf = NULL;
40112 struct qstr name;
40113
40114 if (size < sizeof(outarg))
40115 @@ -899,6 +900,11 @@ static int fuse_notify_inval_entry(struc
40116 if (outarg.namelen > FUSE_NAME_MAX)
40117 goto err;
40118
40119 + err = -ENOMEM;
40120 + buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
40121 + if (!buf)
40122 + goto err;
40123 +
40124 name.name = buf;
40125 name.len = outarg.namelen;
40126 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
40127 @@ -910,17 +916,15 @@ static int fuse_notify_inval_entry(struc
40128
40129 down_read(&fc->killsb);
40130 err = -ENOENT;
40131 - if (!fc->sb)
40132 - goto err_unlock;
40133 -
40134 - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
40135 -
40136 -err_unlock:
40137 + if (fc->sb)
40138 + err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
40139 up_read(&fc->killsb);
40140 + kfree(buf);
40141 return err;
40142
40143 err:
40144 fuse_copy_finish(cs);
40145 + kfree(buf);
40146 return err;
40147 }
40148
40149 @@ -987,7 +991,7 @@ static int copy_out_args(struct fuse_cop
40150 * it from the list and copy the rest of the buffer to the request.
40151 * The request is finished by calling request_end()
40152 */
40153 -static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40154 +ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40155 unsigned long nr_segs, loff_t pos)
40156 {
40157 int err;
40158 @@ -1083,8 +1087,9 @@ static ssize_t fuse_dev_write(struct kio
40159 fuse_copy_finish(&cs);
40160 return err;
40161 }
40162 +EXPORT_SYMBOL_GPL(fuse_dev_write);
40163
40164 -static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40165 +unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40166 {
40167 unsigned mask = POLLOUT | POLLWRNORM;
40168 struct fuse_conn *fc = fuse_get_conn(file);
40169 @@ -1102,6 +1107,7 @@ static unsigned fuse_dev_poll(struct fil
40170
40171 return mask;
40172 }
40173 +EXPORT_SYMBOL_GPL(fuse_dev_poll);
40174
40175 /*
40176 * Abort all requests on the given list (pending or processing)
40177 @@ -1218,7 +1224,7 @@ int fuse_dev_release(struct inode *inode
40178 }
40179 EXPORT_SYMBOL_GPL(fuse_dev_release);
40180
40181 -static int fuse_dev_fasync(int fd, struct file *file, int on)
40182 +int fuse_dev_fasync(int fd, struct file *file, int on)
40183 {
40184 struct fuse_conn *fc = fuse_get_conn(file);
40185 if (!fc)
40186 @@ -1227,6 +1233,7 @@ static int fuse_dev_fasync(int fd, struc
40187 /* No locking - fasync_helper does its own locking */
40188 return fasync_helper(fd, file, on, &fc->fasync);
40189 }
40190 +EXPORT_SYMBOL_GPL(fuse_dev_fasync);
40191
40192 const struct file_operations fuse_dev_operations = {
40193 .owner = THIS_MODULE,
40194 diff -urNp linux-2.6.32.42/fs/fuse/dir.c linux-2.6.32.42/fs/fuse/dir.c
40195 --- linux-2.6.32.42/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
40196 +++ linux-2.6.32.42/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
40197 @@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
40198 return link;
40199 }
40200
40201 -static void free_link(char *link)
40202 +static void free_link(const char *link)
40203 {
40204 if (!IS_ERR(link))
40205 free_page((unsigned long) link);
40206 diff -urNp linux-2.6.32.42/fs/fuse/fuse_i.h linux-2.6.32.42/fs/fuse/fuse_i.h
40207 --- linux-2.6.32.42/fs/fuse/fuse_i.h 2011-03-27 14:31:47.000000000 -0400
40208 +++ linux-2.6.32.42/fs/fuse/fuse_i.h 2011-04-17 15:56:46.000000000 -0400
40209 @@ -525,6 +525,16 @@ extern const struct file_operations fuse
40210
40211 extern const struct dentry_operations fuse_dentry_operations;
40212
40213 +extern ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40214 + unsigned long nr_segs, loff_t pos);
40215 +
40216 +extern ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40217 + unsigned long nr_segs, loff_t pos);
40218 +
40219 +extern unsigned fuse_dev_poll(struct file *file, poll_table *wait);
40220 +
40221 +extern int fuse_dev_fasync(int fd, struct file *file, int on);
40222 +
40223 /**
40224 * Inode to nodeid comparison.
40225 */
40226 diff -urNp linux-2.6.32.42/fs/gfs2/ops_inode.c linux-2.6.32.42/fs/gfs2/ops_inode.c
40227 --- linux-2.6.32.42/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
40228 +++ linux-2.6.32.42/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
40229 @@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
40230 unsigned int x;
40231 int error;
40232
40233 + pax_track_stack();
40234 +
40235 if (ndentry->d_inode) {
40236 nip = GFS2_I(ndentry->d_inode);
40237 if (ip == nip)
40238 diff -urNp linux-2.6.32.42/fs/gfs2/sys.c linux-2.6.32.42/fs/gfs2/sys.c
40239 --- linux-2.6.32.42/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
40240 +++ linux-2.6.32.42/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
40241 @@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
40242 return a->store ? a->store(sdp, buf, len) : len;
40243 }
40244
40245 -static struct sysfs_ops gfs2_attr_ops = {
40246 +static const struct sysfs_ops gfs2_attr_ops = {
40247 .show = gfs2_attr_show,
40248 .store = gfs2_attr_store,
40249 };
40250 @@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
40251 return 0;
40252 }
40253
40254 -static struct kset_uevent_ops gfs2_uevent_ops = {
40255 +static const struct kset_uevent_ops gfs2_uevent_ops = {
40256 .uevent = gfs2_uevent,
40257 };
40258
40259 diff -urNp linux-2.6.32.42/fs/hfsplus/catalog.c linux-2.6.32.42/fs/hfsplus/catalog.c
40260 --- linux-2.6.32.42/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
40261 +++ linux-2.6.32.42/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
40262 @@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
40263 int err;
40264 u16 type;
40265
40266 + pax_track_stack();
40267 +
40268 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
40269 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
40270 if (err)
40271 @@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
40272 int entry_size;
40273 int err;
40274
40275 + pax_track_stack();
40276 +
40277 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
40278 sb = dir->i_sb;
40279 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
40280 @@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
40281 int entry_size, type;
40282 int err = 0;
40283
40284 + pax_track_stack();
40285 +
40286 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
40287 dst_dir->i_ino, dst_name->name);
40288 sb = src_dir->i_sb;
40289 diff -urNp linux-2.6.32.42/fs/hfsplus/dir.c linux-2.6.32.42/fs/hfsplus/dir.c
40290 --- linux-2.6.32.42/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
40291 +++ linux-2.6.32.42/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
40292 @@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
40293 struct hfsplus_readdir_data *rd;
40294 u16 type;
40295
40296 + pax_track_stack();
40297 +
40298 if (filp->f_pos >= inode->i_size)
40299 return 0;
40300
40301 diff -urNp linux-2.6.32.42/fs/hfsplus/inode.c linux-2.6.32.42/fs/hfsplus/inode.c
40302 --- linux-2.6.32.42/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
40303 +++ linux-2.6.32.42/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
40304 @@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
40305 int res = 0;
40306 u16 type;
40307
40308 + pax_track_stack();
40309 +
40310 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
40311
40312 HFSPLUS_I(inode).dev = 0;
40313 @@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
40314 struct hfs_find_data fd;
40315 hfsplus_cat_entry entry;
40316
40317 + pax_track_stack();
40318 +
40319 if (HFSPLUS_IS_RSRC(inode))
40320 main_inode = HFSPLUS_I(inode).rsrc_inode;
40321
40322 diff -urNp linux-2.6.32.42/fs/hfsplus/ioctl.c linux-2.6.32.42/fs/hfsplus/ioctl.c
40323 --- linux-2.6.32.42/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40324 +++ linux-2.6.32.42/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
40325 @@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
40326 struct hfsplus_cat_file *file;
40327 int res;
40328
40329 + pax_track_stack();
40330 +
40331 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40332 return -EOPNOTSUPP;
40333
40334 @@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
40335 struct hfsplus_cat_file *file;
40336 ssize_t res = 0;
40337
40338 + pax_track_stack();
40339 +
40340 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
40341 return -EOPNOTSUPP;
40342
40343 diff -urNp linux-2.6.32.42/fs/hfsplus/super.c linux-2.6.32.42/fs/hfsplus/super.c
40344 --- linux-2.6.32.42/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
40345 +++ linux-2.6.32.42/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
40346 @@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
40347 struct nls_table *nls = NULL;
40348 int err = -EINVAL;
40349
40350 + pax_track_stack();
40351 +
40352 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
40353 if (!sbi)
40354 return -ENOMEM;
40355 diff -urNp linux-2.6.32.42/fs/hugetlbfs/inode.c linux-2.6.32.42/fs/hugetlbfs/inode.c
40356 --- linux-2.6.32.42/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40357 +++ linux-2.6.32.42/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
40358 @@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
40359 .kill_sb = kill_litter_super,
40360 };
40361
40362 -static struct vfsmount *hugetlbfs_vfsmount;
40363 +struct vfsmount *hugetlbfs_vfsmount;
40364
40365 static int can_do_hugetlb_shm(void)
40366 {
40367 diff -urNp linux-2.6.32.42/fs/ioctl.c linux-2.6.32.42/fs/ioctl.c
40368 --- linux-2.6.32.42/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
40369 +++ linux-2.6.32.42/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
40370 @@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
40371 u64 phys, u64 len, u32 flags)
40372 {
40373 struct fiemap_extent extent;
40374 - struct fiemap_extent *dest = fieinfo->fi_extents_start;
40375 + struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
40376
40377 /* only count the extents */
40378 if (fieinfo->fi_extents_max == 0) {
40379 @@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
40380
40381 fieinfo.fi_flags = fiemap.fm_flags;
40382 fieinfo.fi_extents_max = fiemap.fm_extent_count;
40383 - fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
40384 + fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
40385
40386 if (fiemap.fm_extent_count != 0 &&
40387 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
40388 @@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
40389 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
40390 fiemap.fm_flags = fieinfo.fi_flags;
40391 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
40392 - if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
40393 + if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
40394 error = -EFAULT;
40395
40396 return error;
40397 diff -urNp linux-2.6.32.42/fs/jbd/checkpoint.c linux-2.6.32.42/fs/jbd/checkpoint.c
40398 --- linux-2.6.32.42/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
40399 +++ linux-2.6.32.42/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
40400 @@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
40401 tid_t this_tid;
40402 int result;
40403
40404 + pax_track_stack();
40405 +
40406 jbd_debug(1, "Start checkpoint\n");
40407
40408 /*
40409 diff -urNp linux-2.6.32.42/fs/jffs2/compr_rtime.c linux-2.6.32.42/fs/jffs2/compr_rtime.c
40410 --- linux-2.6.32.42/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
40411 +++ linux-2.6.32.42/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
40412 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
40413 int outpos = 0;
40414 int pos=0;
40415
40416 + pax_track_stack();
40417 +
40418 memset(positions,0,sizeof(positions));
40419
40420 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
40421 @@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
40422 int outpos = 0;
40423 int pos=0;
40424
40425 + pax_track_stack();
40426 +
40427 memset(positions,0,sizeof(positions));
40428
40429 while (outpos<destlen) {
40430 diff -urNp linux-2.6.32.42/fs/jffs2/compr_rubin.c linux-2.6.32.42/fs/jffs2/compr_rubin.c
40431 --- linux-2.6.32.42/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
40432 +++ linux-2.6.32.42/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
40433 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
40434 int ret;
40435 uint32_t mysrclen, mydstlen;
40436
40437 + pax_track_stack();
40438 +
40439 mysrclen = *sourcelen;
40440 mydstlen = *dstlen - 8;
40441
40442 diff -urNp linux-2.6.32.42/fs/jffs2/erase.c linux-2.6.32.42/fs/jffs2/erase.c
40443 --- linux-2.6.32.42/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
40444 +++ linux-2.6.32.42/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
40445 @@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
40446 struct jffs2_unknown_node marker = {
40447 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
40448 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40449 - .totlen = cpu_to_je32(c->cleanmarker_size)
40450 + .totlen = cpu_to_je32(c->cleanmarker_size),
40451 + .hdr_crc = cpu_to_je32(0)
40452 };
40453
40454 jffs2_prealloc_raw_node_refs(c, jeb, 1);
40455 diff -urNp linux-2.6.32.42/fs/jffs2/wbuf.c linux-2.6.32.42/fs/jffs2/wbuf.c
40456 --- linux-2.6.32.42/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
40457 +++ linux-2.6.32.42/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
40458 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
40459 {
40460 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
40461 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
40462 - .totlen = constant_cpu_to_je32(8)
40463 + .totlen = constant_cpu_to_je32(8),
40464 + .hdr_crc = constant_cpu_to_je32(0)
40465 };
40466
40467 /*
40468 diff -urNp linux-2.6.32.42/fs/jffs2/xattr.c linux-2.6.32.42/fs/jffs2/xattr.c
40469 --- linux-2.6.32.42/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
40470 +++ linux-2.6.32.42/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
40471 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
40472
40473 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
40474
40475 + pax_track_stack();
40476 +
40477 /* Phase.1 : Merge same xref */
40478 for (i=0; i < XREF_TMPHASH_SIZE; i++)
40479 xref_tmphash[i] = NULL;
40480 diff -urNp linux-2.6.32.42/fs/jfs/super.c linux-2.6.32.42/fs/jfs/super.c
40481 --- linux-2.6.32.42/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
40482 +++ linux-2.6.32.42/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
40483 @@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
40484
40485 jfs_inode_cachep =
40486 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
40487 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
40488 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
40489 init_once);
40490 if (jfs_inode_cachep == NULL)
40491 return -ENOMEM;
40492 diff -urNp linux-2.6.32.42/fs/Kconfig.binfmt linux-2.6.32.42/fs/Kconfig.binfmt
40493 --- linux-2.6.32.42/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
40494 +++ linux-2.6.32.42/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
40495 @@ -86,7 +86,7 @@ config HAVE_AOUT
40496
40497 config BINFMT_AOUT
40498 tristate "Kernel support for a.out and ECOFF binaries"
40499 - depends on HAVE_AOUT
40500 + depends on HAVE_AOUT && BROKEN
40501 ---help---
40502 A.out (Assembler.OUTput) is a set of formats for libraries and
40503 executables used in the earliest versions of UNIX. Linux used
40504 diff -urNp linux-2.6.32.42/fs/libfs.c linux-2.6.32.42/fs/libfs.c
40505 --- linux-2.6.32.42/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
40506 +++ linux-2.6.32.42/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
40507 @@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
40508
40509 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
40510 struct dentry *next;
40511 + char d_name[sizeof(next->d_iname)];
40512 + const unsigned char *name;
40513 +
40514 next = list_entry(p, struct dentry, d_u.d_child);
40515 if (d_unhashed(next) || !next->d_inode)
40516 continue;
40517
40518 spin_unlock(&dcache_lock);
40519 - if (filldir(dirent, next->d_name.name,
40520 + name = next->d_name.name;
40521 + if (name == next->d_iname) {
40522 + memcpy(d_name, name, next->d_name.len);
40523 + name = d_name;
40524 + }
40525 + if (filldir(dirent, name,
40526 next->d_name.len, filp->f_pos,
40527 next->d_inode->i_ino,
40528 dt_type(next->d_inode)) < 0)
40529 diff -urNp linux-2.6.32.42/fs/lockd/clntproc.c linux-2.6.32.42/fs/lockd/clntproc.c
40530 --- linux-2.6.32.42/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
40531 +++ linux-2.6.32.42/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
40532 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
40533 /*
40534 * Cookie counter for NLM requests
40535 */
40536 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
40537 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
40538
40539 void nlmclnt_next_cookie(struct nlm_cookie *c)
40540 {
40541 - u32 cookie = atomic_inc_return(&nlm_cookie);
40542 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
40543
40544 memcpy(c->data, &cookie, 4);
40545 c->len=4;
40546 @@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
40547 struct nlm_rqst reqst, *req;
40548 int status;
40549
40550 + pax_track_stack();
40551 +
40552 req = &reqst;
40553 memset(req, 0, sizeof(*req));
40554 locks_init_lock(&req->a_args.lock.fl);
40555 diff -urNp linux-2.6.32.42/fs/lockd/svc.c linux-2.6.32.42/fs/lockd/svc.c
40556 --- linux-2.6.32.42/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
40557 +++ linux-2.6.32.42/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
40558 @@ -43,7 +43,7 @@
40559
40560 static struct svc_program nlmsvc_program;
40561
40562 -struct nlmsvc_binding * nlmsvc_ops;
40563 +const struct nlmsvc_binding * nlmsvc_ops;
40564 EXPORT_SYMBOL_GPL(nlmsvc_ops);
40565
40566 static DEFINE_MUTEX(nlmsvc_mutex);
40567 diff -urNp linux-2.6.32.42/fs/locks.c linux-2.6.32.42/fs/locks.c
40568 --- linux-2.6.32.42/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
40569 +++ linux-2.6.32.42/fs/locks.c 2011-04-17 15:56:46.000000000 -0400
40570 @@ -2007,16 +2007,16 @@ void locks_remove_flock(struct file *fil
40571 return;
40572
40573 if (filp->f_op && filp->f_op->flock) {
40574 - struct file_lock fl = {
40575 + struct file_lock flock = {
40576 .fl_pid = current->tgid,
40577 .fl_file = filp,
40578 .fl_flags = FL_FLOCK,
40579 .fl_type = F_UNLCK,
40580 .fl_end = OFFSET_MAX,
40581 };
40582 - filp->f_op->flock(filp, F_SETLKW, &fl);
40583 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
40584 - fl.fl_ops->fl_release_private(&fl);
40585 + filp->f_op->flock(filp, F_SETLKW, &flock);
40586 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
40587 + flock.fl_ops->fl_release_private(&flock);
40588 }
40589
40590 lock_kernel();
40591 diff -urNp linux-2.6.32.42/fs/namei.c linux-2.6.32.42/fs/namei.c
40592 --- linux-2.6.32.42/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
40593 +++ linux-2.6.32.42/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
40594 @@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
40595 return ret;
40596
40597 /*
40598 - * Read/write DACs are always overridable.
40599 - * Executable DACs are overridable if at least one exec bit is set.
40600 - */
40601 - if (!(mask & MAY_EXEC) || execute_ok(inode))
40602 - if (capable(CAP_DAC_OVERRIDE))
40603 - return 0;
40604 -
40605 - /*
40606 * Searching includes executable on directories, else just read.
40607 */
40608 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
40609 @@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
40610 if (capable(CAP_DAC_READ_SEARCH))
40611 return 0;
40612
40613 + /*
40614 + * Read/write DACs are always overridable.
40615 + * Executable DACs are overridable if at least one exec bit is set.
40616 + */
40617 + if (!(mask & MAY_EXEC) || execute_ok(inode))
40618 + if (capable(CAP_DAC_OVERRIDE))
40619 + return 0;
40620 +
40621 return -EACCES;
40622 }
40623
40624 @@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
40625 if (!ret)
40626 goto ok;
40627
40628 - if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
40629 + if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
40630 + capable(CAP_DAC_OVERRIDE))
40631 goto ok;
40632
40633 return ret;
40634 @@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
40635 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
40636 error = PTR_ERR(cookie);
40637 if (!IS_ERR(cookie)) {
40638 - char *s = nd_get_link(nd);
40639 + const char *s = nd_get_link(nd);
40640 error = 0;
40641 if (s)
40642 error = __vfs_follow_link(nd, s);
40643 @@ -669,6 +670,13 @@ static inline int do_follow_link(struct
40644 err = security_inode_follow_link(path->dentry, nd);
40645 if (err)
40646 goto loop;
40647 +
40648 + if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
40649 + path->dentry->d_inode, path->dentry, nd->path.mnt)) {
40650 + err = -EACCES;
40651 + goto loop;
40652 + }
40653 +
40654 current->link_count++;
40655 current->total_link_count++;
40656 nd->depth++;
40657 @@ -1016,11 +1024,18 @@ return_reval:
40658 break;
40659 }
40660 return_base:
40661 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
40662 + path_put(&nd->path);
40663 + return -ENOENT;
40664 + }
40665 return 0;
40666 out_dput:
40667 path_put_conditional(&next, nd);
40668 break;
40669 }
40670 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
40671 + err = -ENOENT;
40672 +
40673 path_put(&nd->path);
40674 return_err:
40675 return err;
40676 @@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
40677 int retval = path_init(dfd, name, flags, nd);
40678 if (!retval)
40679 retval = path_walk(name, nd);
40680 - if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
40681 - nd->path.dentry->d_inode))
40682 - audit_inode(name, nd->path.dentry);
40683 +
40684 + if (likely(!retval)) {
40685 + if (nd->path.dentry && nd->path.dentry->d_inode) {
40686 + if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
40687 + retval = -ENOENT;
40688 + if (!audit_dummy_context())
40689 + audit_inode(name, nd->path.dentry);
40690 + }
40691 + }
40692 if (nd->root.mnt) {
40693 path_put(&nd->root);
40694 nd->root.mnt = NULL;
40695 }
40696 +
40697 return retval;
40698 }
40699
40700 @@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
40701 if (error)
40702 goto err_out;
40703
40704 +
40705 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
40706 + error = -EPERM;
40707 + goto err_out;
40708 + }
40709 + if (gr_handle_rawio(inode)) {
40710 + error = -EPERM;
40711 + goto err_out;
40712 + }
40713 + if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
40714 + error = -EACCES;
40715 + goto err_out;
40716 + }
40717 +
40718 if (flag & O_TRUNC) {
40719 error = get_write_access(inode);
40720 if (error)
40721 @@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
40722 int error;
40723 struct dentry *dir = nd->path.dentry;
40724
40725 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
40726 + error = -EACCES;
40727 + goto out_unlock;
40728 + }
40729 +
40730 if (!IS_POSIXACL(dir->d_inode))
40731 mode &= ~current_umask();
40732 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
40733 if (error)
40734 goto out_unlock;
40735 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
40736 + if (!error)
40737 + gr_handle_create(path->dentry, nd->path.mnt);
40738 out_unlock:
40739 mutex_unlock(&dir->d_inode->i_mutex);
40740 dput(nd->path.dentry);
40741 @@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
40742 &nd, flag);
40743 if (error)
40744 return ERR_PTR(error);
40745 +
40746 + if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
40747 + error = -EPERM;
40748 + goto exit;
40749 + }
40750 +
40751 + if (gr_handle_rawio(nd.path.dentry->d_inode)) {
40752 + error = -EPERM;
40753 + goto exit;
40754 + }
40755 +
40756 + if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
40757 + error = -EACCES;
40758 + goto exit;
40759 + }
40760 +
40761 goto ok;
40762 }
40763
40764 @@ -1795,6 +1854,14 @@ do_last:
40765 /*
40766 * It already exists.
40767 */
40768 +
40769 + /* only check if O_CREAT is specified, all other checks need
40770 + to go into may_open */
40771 + if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
40772 + error = -EACCES;
40773 + goto exit_mutex_unlock;
40774 + }
40775 +
40776 mutex_unlock(&dir->d_inode->i_mutex);
40777 audit_inode(pathname, path.dentry);
40778
40779 @@ -1887,6 +1954,13 @@ do_link:
40780 error = security_inode_follow_link(path.dentry, &nd);
40781 if (error)
40782 goto exit_dput;
40783 +
40784 + if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
40785 + path.dentry, nd.path.mnt)) {
40786 + error = -EACCES;
40787 + goto exit_dput;
40788 + }
40789 +
40790 error = __do_follow_link(&path, &nd);
40791 if (error) {
40792 /* Does someone understand code flow here? Or it is only
40793 @@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
40794 error = may_mknod(mode);
40795 if (error)
40796 goto out_dput;
40797 +
40798 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
40799 + error = -EPERM;
40800 + goto out_dput;
40801 + }
40802 +
40803 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
40804 + error = -EACCES;
40805 + goto out_dput;
40806 + }
40807 +
40808 error = mnt_want_write(nd.path.mnt);
40809 if (error)
40810 goto out_dput;
40811 @@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
40812 }
40813 out_drop_write:
40814 mnt_drop_write(nd.path.mnt);
40815 +
40816 + if (!error)
40817 + gr_handle_create(dentry, nd.path.mnt);
40818 out_dput:
40819 dput(dentry);
40820 out_unlock:
40821 @@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
40822 if (IS_ERR(dentry))
40823 goto out_unlock;
40824
40825 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
40826 + error = -EACCES;
40827 + goto out_dput;
40828 + }
40829 +
40830 if (!IS_POSIXACL(nd.path.dentry->d_inode))
40831 mode &= ~current_umask();
40832 error = mnt_want_write(nd.path.mnt);
40833 @@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
40834 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
40835 out_drop_write:
40836 mnt_drop_write(nd.path.mnt);
40837 +
40838 + if (!error)
40839 + gr_handle_create(dentry, nd.path.mnt);
40840 +
40841 out_dput:
40842 dput(dentry);
40843 out_unlock:
40844 @@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
40845 char * name;
40846 struct dentry *dentry;
40847 struct nameidata nd;
40848 + ino_t saved_ino = 0;
40849 + dev_t saved_dev = 0;
40850
40851 error = user_path_parent(dfd, pathname, &nd, &name);
40852 if (error)
40853 @@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
40854 error = PTR_ERR(dentry);
40855 if (IS_ERR(dentry))
40856 goto exit2;
40857 +
40858 + if (dentry->d_inode != NULL) {
40859 + if (dentry->d_inode->i_nlink <= 1) {
40860 + saved_ino = dentry->d_inode->i_ino;
40861 + saved_dev = gr_get_dev_from_dentry(dentry);
40862 + }
40863 +
40864 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
40865 + error = -EACCES;
40866 + goto exit3;
40867 + }
40868 + }
40869 +
40870 error = mnt_want_write(nd.path.mnt);
40871 if (error)
40872 goto exit3;
40873 @@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
40874 if (error)
40875 goto exit4;
40876 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
40877 + if (!error && (saved_dev || saved_ino))
40878 + gr_handle_delete(saved_ino, saved_dev);
40879 exit4:
40880 mnt_drop_write(nd.path.mnt);
40881 exit3:
40882 @@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
40883 struct dentry *dentry;
40884 struct nameidata nd;
40885 struct inode *inode = NULL;
40886 + ino_t saved_ino = 0;
40887 + dev_t saved_dev = 0;
40888
40889 error = user_path_parent(dfd, pathname, &nd, &name);
40890 if (error)
40891 @@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
40892 if (nd.last.name[nd.last.len])
40893 goto slashes;
40894 inode = dentry->d_inode;
40895 - if (inode)
40896 + if (inode) {
40897 + if (inode->i_nlink <= 1) {
40898 + saved_ino = inode->i_ino;
40899 + saved_dev = gr_get_dev_from_dentry(dentry);
40900 + }
40901 +
40902 atomic_inc(&inode->i_count);
40903 +
40904 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
40905 + error = -EACCES;
40906 + goto exit2;
40907 + }
40908 + }
40909 error = mnt_want_write(nd.path.mnt);
40910 if (error)
40911 goto exit2;
40912 @@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
40913 if (error)
40914 goto exit3;
40915 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
40916 + if (!error && (saved_ino || saved_dev))
40917 + gr_handle_delete(saved_ino, saved_dev);
40918 exit3:
40919 mnt_drop_write(nd.path.mnt);
40920 exit2:
40921 @@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
40922 if (IS_ERR(dentry))
40923 goto out_unlock;
40924
40925 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
40926 + error = -EACCES;
40927 + goto out_dput;
40928 + }
40929 +
40930 error = mnt_want_write(nd.path.mnt);
40931 if (error)
40932 goto out_dput;
40933 @@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
40934 if (error)
40935 goto out_drop_write;
40936 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
40937 + if (!error)
40938 + gr_handle_create(dentry, nd.path.mnt);
40939 out_drop_write:
40940 mnt_drop_write(nd.path.mnt);
40941 out_dput:
40942 @@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
40943 error = PTR_ERR(new_dentry);
40944 if (IS_ERR(new_dentry))
40945 goto out_unlock;
40946 +
40947 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
40948 + old_path.dentry->d_inode,
40949 + old_path.dentry->d_inode->i_mode, to)) {
40950 + error = -EACCES;
40951 + goto out_dput;
40952 + }
40953 +
40954 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
40955 + old_path.dentry, old_path.mnt, to)) {
40956 + error = -EACCES;
40957 + goto out_dput;
40958 + }
40959 +
40960 error = mnt_want_write(nd.path.mnt);
40961 if (error)
40962 goto out_dput;
40963 @@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
40964 if (error)
40965 goto out_drop_write;
40966 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
40967 + if (!error)
40968 + gr_handle_create(new_dentry, nd.path.mnt);
40969 out_drop_write:
40970 mnt_drop_write(nd.path.mnt);
40971 out_dput:
40972 @@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40973 char *to;
40974 int error;
40975
40976 + pax_track_stack();
40977 +
40978 error = user_path_parent(olddfd, oldname, &oldnd, &from);
40979 if (error)
40980 goto exit;
40981 @@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40982 if (new_dentry == trap)
40983 goto exit5;
40984
40985 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
40986 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
40987 + to);
40988 + if (error)
40989 + goto exit5;
40990 +
40991 error = mnt_want_write(oldnd.path.mnt);
40992 if (error)
40993 goto exit5;
40994 @@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
40995 goto exit6;
40996 error = vfs_rename(old_dir->d_inode, old_dentry,
40997 new_dir->d_inode, new_dentry);
40998 + if (!error)
40999 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
41000 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
41001 exit6:
41002 mnt_drop_write(oldnd.path.mnt);
41003 exit5:
41004 @@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
41005
41006 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
41007 {
41008 + char tmpbuf[64];
41009 + const char *newlink;
41010 int len;
41011
41012 len = PTR_ERR(link);
41013 @@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
41014 len = strlen(link);
41015 if (len > (unsigned) buflen)
41016 len = buflen;
41017 - if (copy_to_user(buffer, link, len))
41018 +
41019 + if (len < sizeof(tmpbuf)) {
41020 + memcpy(tmpbuf, link, len);
41021 + newlink = tmpbuf;
41022 + } else
41023 + newlink = link;
41024 +
41025 + if (copy_to_user(buffer, newlink, len))
41026 len = -EFAULT;
41027 out:
41028 return len;
41029 diff -urNp linux-2.6.32.42/fs/namespace.c linux-2.6.32.42/fs/namespace.c
41030 --- linux-2.6.32.42/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
41031 +++ linux-2.6.32.42/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
41032 @@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
41033 if (!(sb->s_flags & MS_RDONLY))
41034 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
41035 up_write(&sb->s_umount);
41036 +
41037 + gr_log_remount(mnt->mnt_devname, retval);
41038 +
41039 return retval;
41040 }
41041
41042 @@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
41043 security_sb_umount_busy(mnt);
41044 up_write(&namespace_sem);
41045 release_mounts(&umount_list);
41046 +
41047 + gr_log_unmount(mnt->mnt_devname, retval);
41048 +
41049 return retval;
41050 }
41051
41052 @@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
41053 if (retval)
41054 goto dput_out;
41055
41056 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
41057 + retval = -EPERM;
41058 + goto dput_out;
41059 + }
41060 +
41061 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
41062 + retval = -EPERM;
41063 + goto dput_out;
41064 + }
41065 +
41066 if (flags & MS_REMOUNT)
41067 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
41068 data_page);
41069 @@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
41070 dev_name, data_page);
41071 dput_out:
41072 path_put(&path);
41073 +
41074 + gr_log_mount(dev_name, dir_name, retval);
41075 +
41076 return retval;
41077 }
41078
41079 @@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
41080 goto out1;
41081 }
41082
41083 + if (gr_handle_chroot_pivot()) {
41084 + error = -EPERM;
41085 + path_put(&old);
41086 + goto out1;
41087 + }
41088 +
41089 read_lock(&current->fs->lock);
41090 root = current->fs->root;
41091 path_get(&current->fs->root);
41092 diff -urNp linux-2.6.32.42/fs/ncpfs/dir.c linux-2.6.32.42/fs/ncpfs/dir.c
41093 --- linux-2.6.32.42/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41094 +++ linux-2.6.32.42/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
41095 @@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
41096 int res, val = 0, len;
41097 __u8 __name[NCP_MAXPATHLEN + 1];
41098
41099 + pax_track_stack();
41100 +
41101 parent = dget_parent(dentry);
41102 dir = parent->d_inode;
41103
41104 @@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
41105 int error, res, len;
41106 __u8 __name[NCP_MAXPATHLEN + 1];
41107
41108 + pax_track_stack();
41109 +
41110 lock_kernel();
41111 error = -EIO;
41112 if (!ncp_conn_valid(server))
41113 @@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
41114 int error, result, len;
41115 int opmode;
41116 __u8 __name[NCP_MAXPATHLEN + 1];
41117 -
41118 +
41119 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
41120 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
41121
41122 + pax_track_stack();
41123 +
41124 error = -EIO;
41125 lock_kernel();
41126 if (!ncp_conn_valid(server))
41127 @@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
41128 int error, len;
41129 __u8 __name[NCP_MAXPATHLEN + 1];
41130
41131 + pax_track_stack();
41132 +
41133 DPRINTK("ncp_mkdir: making %s/%s\n",
41134 dentry->d_parent->d_name.name, dentry->d_name.name);
41135
41136 @@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
41137 if (!ncp_conn_valid(server))
41138 goto out;
41139
41140 + pax_track_stack();
41141 +
41142 ncp_age_dentry(server, dentry);
41143 len = sizeof(__name);
41144 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
41145 @@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
41146 int old_len, new_len;
41147 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
41148
41149 + pax_track_stack();
41150 +
41151 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
41152 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
41153 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
41154 diff -urNp linux-2.6.32.42/fs/ncpfs/inode.c linux-2.6.32.42/fs/ncpfs/inode.c
41155 --- linux-2.6.32.42/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
41156 +++ linux-2.6.32.42/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
41157 @@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
41158 #endif
41159 struct ncp_entry_info finfo;
41160
41161 + pax_track_stack();
41162 +
41163 data.wdog_pid = NULL;
41164 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
41165 if (!server)
41166 diff -urNp linux-2.6.32.42/fs/nfs/inode.c linux-2.6.32.42/fs/nfs/inode.c
41167 --- linux-2.6.32.42/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
41168 +++ linux-2.6.32.42/fs/nfs/inode.c 2011-05-10 22:12:33.000000000 -0400
41169 @@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
41170 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
41171 }
41172
41173 -static atomic_long_t nfs_attr_generation_counter;
41174 +static atomic_long_unchecked_t nfs_attr_generation_counter;
41175
41176 static unsigned long nfs_read_attr_generation_counter(void)
41177 {
41178 - return atomic_long_read(&nfs_attr_generation_counter);
41179 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
41180 }
41181
41182 unsigned long nfs_inc_attr_generation_counter(void)
41183 {
41184 - return atomic_long_inc_return(&nfs_attr_generation_counter);
41185 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
41186 }
41187
41188 void nfs_fattr_init(struct nfs_fattr *fattr)
41189 diff -urNp linux-2.6.32.42/fs/nfsd/lockd.c linux-2.6.32.42/fs/nfsd/lockd.c
41190 --- linux-2.6.32.42/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
41191 +++ linux-2.6.32.42/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
41192 @@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
41193 fput(filp);
41194 }
41195
41196 -static struct nlmsvc_binding nfsd_nlm_ops = {
41197 +static const struct nlmsvc_binding nfsd_nlm_ops = {
41198 .fopen = nlm_fopen, /* open file for locking */
41199 .fclose = nlm_fclose, /* close file */
41200 };
41201 diff -urNp linux-2.6.32.42/fs/nfsd/nfs4state.c linux-2.6.32.42/fs/nfsd/nfs4state.c
41202 --- linux-2.6.32.42/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
41203 +++ linux-2.6.32.42/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
41204 @@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
41205 unsigned int cmd;
41206 int err;
41207
41208 + pax_track_stack();
41209 +
41210 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
41211 (long long) lock->lk_offset,
41212 (long long) lock->lk_length);
41213 diff -urNp linux-2.6.32.42/fs/nfsd/nfs4xdr.c linux-2.6.32.42/fs/nfsd/nfs4xdr.c
41214 --- linux-2.6.32.42/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
41215 +++ linux-2.6.32.42/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
41216 @@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
41217 struct nfsd4_compoundres *resp = rqstp->rq_resp;
41218 u32 minorversion = resp->cstate.minorversion;
41219
41220 + pax_track_stack();
41221 +
41222 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
41223 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
41224 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
41225 diff -urNp linux-2.6.32.42/fs/nfsd/vfs.c linux-2.6.32.42/fs/nfsd/vfs.c
41226 --- linux-2.6.32.42/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
41227 +++ linux-2.6.32.42/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
41228 @@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
41229 } else {
41230 oldfs = get_fs();
41231 set_fs(KERNEL_DS);
41232 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
41233 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
41234 set_fs(oldfs);
41235 }
41236
41237 @@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
41238
41239 /* Write the data. */
41240 oldfs = get_fs(); set_fs(KERNEL_DS);
41241 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
41242 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
41243 set_fs(oldfs);
41244 if (host_err < 0)
41245 goto out_nfserr;
41246 @@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
41247 */
41248
41249 oldfs = get_fs(); set_fs(KERNEL_DS);
41250 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
41251 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
41252 set_fs(oldfs);
41253
41254 if (host_err < 0)
41255 diff -urNp linux-2.6.32.42/fs/nilfs2/ioctl.c linux-2.6.32.42/fs/nilfs2/ioctl.c
41256 --- linux-2.6.32.42/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
41257 +++ linux-2.6.32.42/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
41258 @@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
41259 unsigned int cmd, void __user *argp)
41260 {
41261 struct nilfs_argv argv[5];
41262 - const static size_t argsz[5] = {
41263 + static const size_t argsz[5] = {
41264 sizeof(struct nilfs_vdesc),
41265 sizeof(struct nilfs_period),
41266 sizeof(__u64),
41267 diff -urNp linux-2.6.32.42/fs/notify/dnotify/dnotify.c linux-2.6.32.42/fs/notify/dnotify/dnotify.c
41268 --- linux-2.6.32.42/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
41269 +++ linux-2.6.32.42/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
41270 @@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
41271 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
41272 }
41273
41274 -static struct fsnotify_ops dnotify_fsnotify_ops = {
41275 +static const struct fsnotify_ops dnotify_fsnotify_ops = {
41276 .handle_event = dnotify_handle_event,
41277 .should_send_event = dnotify_should_send_event,
41278 .free_group_priv = NULL,
41279 diff -urNp linux-2.6.32.42/fs/notify/notification.c linux-2.6.32.42/fs/notify/notification.c
41280 --- linux-2.6.32.42/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
41281 +++ linux-2.6.32.42/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
41282 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
41283 * get set to 0 so it will never get 'freed'
41284 */
41285 static struct fsnotify_event q_overflow_event;
41286 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41287 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
41288
41289 /**
41290 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
41291 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
41292 */
41293 u32 fsnotify_get_cookie(void)
41294 {
41295 - return atomic_inc_return(&fsnotify_sync_cookie);
41296 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
41297 }
41298 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
41299
41300 diff -urNp linux-2.6.32.42/fs/ntfs/dir.c linux-2.6.32.42/fs/ntfs/dir.c
41301 --- linux-2.6.32.42/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41302 +++ linux-2.6.32.42/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
41303 @@ -1328,7 +1328,7 @@ find_next_index_buffer:
41304 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
41305 ~(s64)(ndir->itype.index.block_size - 1)));
41306 /* Bounds checks. */
41307 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41308 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
41309 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
41310 "inode 0x%lx or driver bug.", vdir->i_ino);
41311 goto err_out;
41312 diff -urNp linux-2.6.32.42/fs/ntfs/file.c linux-2.6.32.42/fs/ntfs/file.c
41313 --- linux-2.6.32.42/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
41314 +++ linux-2.6.32.42/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
41315 @@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
41316 #endif /* NTFS_RW */
41317 };
41318
41319 -const struct file_operations ntfs_empty_file_ops = {};
41320 +const struct file_operations ntfs_empty_file_ops __read_only;
41321
41322 -const struct inode_operations ntfs_empty_inode_ops = {};
41323 +const struct inode_operations ntfs_empty_inode_ops __read_only;
41324 diff -urNp linux-2.6.32.42/fs/ocfs2/cluster/masklog.c linux-2.6.32.42/fs/ocfs2/cluster/masklog.c
41325 --- linux-2.6.32.42/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
41326 +++ linux-2.6.32.42/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
41327 @@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
41328 return mlog_mask_store(mlog_attr->mask, buf, count);
41329 }
41330
41331 -static struct sysfs_ops mlog_attr_ops = {
41332 +static const struct sysfs_ops mlog_attr_ops = {
41333 .show = mlog_show,
41334 .store = mlog_store,
41335 };
41336 diff -urNp linux-2.6.32.42/fs/ocfs2/localalloc.c linux-2.6.32.42/fs/ocfs2/localalloc.c
41337 --- linux-2.6.32.42/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
41338 +++ linux-2.6.32.42/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
41339 @@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
41340 goto bail;
41341 }
41342
41343 - atomic_inc(&osb->alloc_stats.moves);
41344 + atomic_inc_unchecked(&osb->alloc_stats.moves);
41345
41346 status = 0;
41347 bail:
41348 diff -urNp linux-2.6.32.42/fs/ocfs2/namei.c linux-2.6.32.42/fs/ocfs2/namei.c
41349 --- linux-2.6.32.42/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
41350 +++ linux-2.6.32.42/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
41351 @@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
41352 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
41353 struct ocfs2_dir_lookup_result target_insert = { NULL, };
41354
41355 + pax_track_stack();
41356 +
41357 /* At some point it might be nice to break this function up a
41358 * bit. */
41359
41360 diff -urNp linux-2.6.32.42/fs/ocfs2/ocfs2.h linux-2.6.32.42/fs/ocfs2/ocfs2.h
41361 --- linux-2.6.32.42/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
41362 +++ linux-2.6.32.42/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
41363 @@ -217,11 +217,11 @@ enum ocfs2_vol_state
41364
41365 struct ocfs2_alloc_stats
41366 {
41367 - atomic_t moves;
41368 - atomic_t local_data;
41369 - atomic_t bitmap_data;
41370 - atomic_t bg_allocs;
41371 - atomic_t bg_extends;
41372 + atomic_unchecked_t moves;
41373 + atomic_unchecked_t local_data;
41374 + atomic_unchecked_t bitmap_data;
41375 + atomic_unchecked_t bg_allocs;
41376 + atomic_unchecked_t bg_extends;
41377 };
41378
41379 enum ocfs2_local_alloc_state
41380 diff -urNp linux-2.6.32.42/fs/ocfs2/suballoc.c linux-2.6.32.42/fs/ocfs2/suballoc.c
41381 --- linux-2.6.32.42/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
41382 +++ linux-2.6.32.42/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
41383 @@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
41384 mlog_errno(status);
41385 goto bail;
41386 }
41387 - atomic_inc(&osb->alloc_stats.bg_extends);
41388 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
41389
41390 /* You should never ask for this much metadata */
41391 BUG_ON(bits_wanted >
41392 @@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
41393 mlog_errno(status);
41394 goto bail;
41395 }
41396 - atomic_inc(&osb->alloc_stats.bg_allocs);
41397 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
41398
41399 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
41400 ac->ac_bits_given += (*num_bits);
41401 @@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
41402 mlog_errno(status);
41403 goto bail;
41404 }
41405 - atomic_inc(&osb->alloc_stats.bg_allocs);
41406 + atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
41407
41408 BUG_ON(num_bits != 1);
41409
41410 @@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
41411 cluster_start,
41412 num_clusters);
41413 if (!status)
41414 - atomic_inc(&osb->alloc_stats.local_data);
41415 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
41416 } else {
41417 if (min_clusters > (osb->bitmap_cpg - 1)) {
41418 /* The only paths asking for contiguousness
41419 @@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
41420 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
41421 bg_blkno,
41422 bg_bit_off);
41423 - atomic_inc(&osb->alloc_stats.bitmap_data);
41424 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
41425 }
41426 }
41427 if (status < 0) {
41428 diff -urNp linux-2.6.32.42/fs/ocfs2/super.c linux-2.6.32.42/fs/ocfs2/super.c
41429 --- linux-2.6.32.42/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
41430 +++ linux-2.6.32.42/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
41431 @@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
41432 "%10s => GlobalAllocs: %d LocalAllocs: %d "
41433 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
41434 "Stats",
41435 - atomic_read(&osb->alloc_stats.bitmap_data),
41436 - atomic_read(&osb->alloc_stats.local_data),
41437 - atomic_read(&osb->alloc_stats.bg_allocs),
41438 - atomic_read(&osb->alloc_stats.moves),
41439 - atomic_read(&osb->alloc_stats.bg_extends));
41440 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
41441 + atomic_read_unchecked(&osb->alloc_stats.local_data),
41442 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
41443 + atomic_read_unchecked(&osb->alloc_stats.moves),
41444 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
41445
41446 out += snprintf(buf + out, len - out,
41447 "%10s => State: %u Descriptor: %llu Size: %u bits "
41448 @@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
41449 spin_lock_init(&osb->osb_xattr_lock);
41450 ocfs2_init_inode_steal_slot(osb);
41451
41452 - atomic_set(&osb->alloc_stats.moves, 0);
41453 - atomic_set(&osb->alloc_stats.local_data, 0);
41454 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
41455 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
41456 - atomic_set(&osb->alloc_stats.bg_extends, 0);
41457 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
41458 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
41459 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
41460 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
41461 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
41462
41463 /* Copy the blockcheck stats from the superblock probe */
41464 osb->osb_ecc_stats = *stats;
41465 diff -urNp linux-2.6.32.42/fs/open.c linux-2.6.32.42/fs/open.c
41466 --- linux-2.6.32.42/fs/open.c 2011-03-27 14:31:47.000000000 -0400
41467 +++ linux-2.6.32.42/fs/open.c 2011-04-17 15:56:46.000000000 -0400
41468 @@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
41469 error = locks_verify_truncate(inode, NULL, length);
41470 if (!error)
41471 error = security_path_truncate(&path, length, 0);
41472 +
41473 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
41474 + error = -EACCES;
41475 +
41476 if (!error) {
41477 vfs_dq_init(inode);
41478 error = do_truncate(path.dentry, length, 0, NULL);
41479 @@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
41480 if (__mnt_is_readonly(path.mnt))
41481 res = -EROFS;
41482
41483 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
41484 + res = -EACCES;
41485 +
41486 out_path_release:
41487 path_put(&path);
41488 out:
41489 @@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
41490 if (error)
41491 goto dput_and_out;
41492
41493 + gr_log_chdir(path.dentry, path.mnt);
41494 +
41495 set_fs_pwd(current->fs, &path);
41496
41497 dput_and_out:
41498 @@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
41499 goto out_putf;
41500
41501 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
41502 +
41503 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
41504 + error = -EPERM;
41505 +
41506 + if (!error)
41507 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
41508 +
41509 if (!error)
41510 set_fs_pwd(current->fs, &file->f_path);
41511 out_putf:
41512 @@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
41513 if (!capable(CAP_SYS_CHROOT))
41514 goto dput_and_out;
41515
41516 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
41517 + goto dput_and_out;
41518 +
41519 + if (gr_handle_chroot_caps(&path)) {
41520 + error = -ENOMEM;
41521 + goto dput_and_out;
41522 + }
41523 +
41524 set_fs_root(current->fs, &path);
41525 +
41526 + gr_handle_chroot_chdir(&path);
41527 +
41528 error = 0;
41529 dput_and_out:
41530 path_put(&path);
41531 @@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
41532 err = mnt_want_write_file(file);
41533 if (err)
41534 goto out_putf;
41535 +
41536 mutex_lock(&inode->i_mutex);
41537 +
41538 + if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
41539 + err = -EACCES;
41540 + goto out_unlock;
41541 + }
41542 +
41543 if (mode == (mode_t) -1)
41544 mode = inode->i_mode;
41545 +
41546 + if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
41547 + err = -EPERM;
41548 + goto out_unlock;
41549 + }
41550 +
41551 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
41552 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
41553 err = notify_change(dentry, &newattrs);
41554 +
41555 +out_unlock:
41556 mutex_unlock(&inode->i_mutex);
41557 mnt_drop_write(file->f_path.mnt);
41558 out_putf:
41559 @@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
41560 error = mnt_want_write(path.mnt);
41561 if (error)
41562 goto dput_and_out;
41563 +
41564 mutex_lock(&inode->i_mutex);
41565 +
41566 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
41567 + error = -EACCES;
41568 + goto out_unlock;
41569 + }
41570 +
41571 if (mode == (mode_t) -1)
41572 mode = inode->i_mode;
41573 +
41574 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
41575 + error = -EACCES;
41576 + goto out_unlock;
41577 + }
41578 +
41579 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
41580 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
41581 error = notify_change(path.dentry, &newattrs);
41582 +
41583 +out_unlock:
41584 mutex_unlock(&inode->i_mutex);
41585 mnt_drop_write(path.mnt);
41586 dput_and_out:
41587 @@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
41588 return sys_fchmodat(AT_FDCWD, filename, mode);
41589 }
41590
41591 -static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
41592 +static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
41593 {
41594 struct inode *inode = dentry->d_inode;
41595 int error;
41596 struct iattr newattrs;
41597
41598 + if (!gr_acl_handle_chown(dentry, mnt))
41599 + return -EACCES;
41600 +
41601 newattrs.ia_valid = ATTR_CTIME;
41602 if (user != (uid_t) -1) {
41603 newattrs.ia_valid |= ATTR_UID;
41604 @@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
41605 error = mnt_want_write(path.mnt);
41606 if (error)
41607 goto out_release;
41608 - error = chown_common(path.dentry, user, group);
41609 + error = chown_common(path.dentry, user, group, path.mnt);
41610 mnt_drop_write(path.mnt);
41611 out_release:
41612 path_put(&path);
41613 @@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
41614 error = mnt_want_write(path.mnt);
41615 if (error)
41616 goto out_release;
41617 - error = chown_common(path.dentry, user, group);
41618 + error = chown_common(path.dentry, user, group, path.mnt);
41619 mnt_drop_write(path.mnt);
41620 out_release:
41621 path_put(&path);
41622 @@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
41623 error = mnt_want_write(path.mnt);
41624 if (error)
41625 goto out_release;
41626 - error = chown_common(path.dentry, user, group);
41627 + error = chown_common(path.dentry, user, group, path.mnt);
41628 mnt_drop_write(path.mnt);
41629 out_release:
41630 path_put(&path);
41631 @@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
41632 goto out_fput;
41633 dentry = file->f_path.dentry;
41634 audit_inode(NULL, dentry);
41635 - error = chown_common(dentry, user, group);
41636 + error = chown_common(dentry, user, group, file->f_path.mnt);
41637 mnt_drop_write(file->f_path.mnt);
41638 out_fput:
41639 fput(file);
41640 @@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
41641 if (!IS_ERR(tmp)) {
41642 fd = get_unused_fd_flags(flags);
41643 if (fd >= 0) {
41644 - struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
41645 + struct file *f;
41646 + /* don't allow to be set by userland */
41647 + flags &= ~FMODE_GREXEC;
41648 + f = do_filp_open(dfd, tmp, flags, mode, 0);
41649 if (IS_ERR(f)) {
41650 put_unused_fd(fd);
41651 fd = PTR_ERR(f);
41652 diff -urNp linux-2.6.32.42/fs/partitions/ldm.c linux-2.6.32.42/fs/partitions/ldm.c
41653 --- linux-2.6.32.42/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
41654 +++ linux-2.6.32.42/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
41655 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
41656 ldm_error ("A VBLK claims to have %d parts.", num);
41657 return false;
41658 }
41659 +
41660 if (rec >= num) {
41661 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
41662 return false;
41663 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
41664 goto found;
41665 }
41666
41667 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
41668 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
41669 if (!f) {
41670 ldm_crit ("Out of memory.");
41671 return false;
41672 diff -urNp linux-2.6.32.42/fs/partitions/mac.c linux-2.6.32.42/fs/partitions/mac.c
41673 --- linux-2.6.32.42/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
41674 +++ linux-2.6.32.42/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
41675 @@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
41676 return 0; /* not a MacOS disk */
41677 }
41678 blocks_in_map = be32_to_cpu(part->map_count);
41679 + printk(" [mac]");
41680 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
41681 put_dev_sector(sect);
41682 return 0;
41683 }
41684 - printk(" [mac]");
41685 for (slot = 1; slot <= blocks_in_map; ++slot) {
41686 int pos = slot * secsize;
41687 put_dev_sector(sect);
41688 diff -urNp linux-2.6.32.42/fs/pipe.c linux-2.6.32.42/fs/pipe.c
41689 --- linux-2.6.32.42/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
41690 +++ linux-2.6.32.42/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
41691 @@ -401,9 +401,9 @@ redo:
41692 }
41693 if (bufs) /* More to do? */
41694 continue;
41695 - if (!pipe->writers)
41696 + if (!atomic_read(&pipe->writers))
41697 break;
41698 - if (!pipe->waiting_writers) {
41699 + if (!atomic_read(&pipe->waiting_writers)) {
41700 /* syscall merging: Usually we must not sleep
41701 * if O_NONBLOCK is set, or if we got some data.
41702 * But if a writer sleeps in kernel space, then
41703 @@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
41704 mutex_lock(&inode->i_mutex);
41705 pipe = inode->i_pipe;
41706
41707 - if (!pipe->readers) {
41708 + if (!atomic_read(&pipe->readers)) {
41709 send_sig(SIGPIPE, current, 0);
41710 ret = -EPIPE;
41711 goto out;
41712 @@ -511,7 +511,7 @@ redo1:
41713 for (;;) {
41714 int bufs;
41715
41716 - if (!pipe->readers) {
41717 + if (!atomic_read(&pipe->readers)) {
41718 send_sig(SIGPIPE, current, 0);
41719 if (!ret)
41720 ret = -EPIPE;
41721 @@ -597,9 +597,9 @@ redo2:
41722 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41723 do_wakeup = 0;
41724 }
41725 - pipe->waiting_writers++;
41726 + atomic_inc(&pipe->waiting_writers);
41727 pipe_wait(pipe);
41728 - pipe->waiting_writers--;
41729 + atomic_dec(&pipe->waiting_writers);
41730 }
41731 out:
41732 mutex_unlock(&inode->i_mutex);
41733 @@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
41734 mask = 0;
41735 if (filp->f_mode & FMODE_READ) {
41736 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
41737 - if (!pipe->writers && filp->f_version != pipe->w_counter)
41738 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
41739 mask |= POLLHUP;
41740 }
41741
41742 @@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
41743 * Most Unices do not set POLLERR for FIFOs but on Linux they
41744 * behave exactly like pipes for poll().
41745 */
41746 - if (!pipe->readers)
41747 + if (!atomic_read(&pipe->readers))
41748 mask |= POLLERR;
41749 }
41750
41751 @@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
41752
41753 mutex_lock(&inode->i_mutex);
41754 pipe = inode->i_pipe;
41755 - pipe->readers -= decr;
41756 - pipe->writers -= decw;
41757 + atomic_sub(decr, &pipe->readers);
41758 + atomic_sub(decw, &pipe->writers);
41759
41760 - if (!pipe->readers && !pipe->writers) {
41761 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
41762 free_pipe_info(inode);
41763 } else {
41764 wake_up_interruptible_sync(&pipe->wait);
41765 @@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
41766
41767 if (inode->i_pipe) {
41768 ret = 0;
41769 - inode->i_pipe->readers++;
41770 + atomic_inc(&inode->i_pipe->readers);
41771 }
41772
41773 mutex_unlock(&inode->i_mutex);
41774 @@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
41775
41776 if (inode->i_pipe) {
41777 ret = 0;
41778 - inode->i_pipe->writers++;
41779 + atomic_inc(&inode->i_pipe->writers);
41780 }
41781
41782 mutex_unlock(&inode->i_mutex);
41783 @@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
41784 if (inode->i_pipe) {
41785 ret = 0;
41786 if (filp->f_mode & FMODE_READ)
41787 - inode->i_pipe->readers++;
41788 + atomic_inc(&inode->i_pipe->readers);
41789 if (filp->f_mode & FMODE_WRITE)
41790 - inode->i_pipe->writers++;
41791 + atomic_inc(&inode->i_pipe->writers);
41792 }
41793
41794 mutex_unlock(&inode->i_mutex);
41795 @@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
41796 inode->i_pipe = NULL;
41797 }
41798
41799 -static struct vfsmount *pipe_mnt __read_mostly;
41800 +struct vfsmount *pipe_mnt __read_mostly;
41801 static int pipefs_delete_dentry(struct dentry *dentry)
41802 {
41803 /*
41804 @@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
41805 goto fail_iput;
41806 inode->i_pipe = pipe;
41807
41808 - pipe->readers = pipe->writers = 1;
41809 + atomic_set(&pipe->readers, 1);
41810 + atomic_set(&pipe->writers, 1);
41811 inode->i_fop = &rdwr_pipefifo_fops;
41812
41813 /*
41814 diff -urNp linux-2.6.32.42/fs/proc/array.c linux-2.6.32.42/fs/proc/array.c
41815 --- linux-2.6.32.42/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
41816 +++ linux-2.6.32.42/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
41817 @@ -60,6 +60,7 @@
41818 #include <linux/tty.h>
41819 #include <linux/string.h>
41820 #include <linux/mman.h>
41821 +#include <linux/grsecurity.h>
41822 #include <linux/proc_fs.h>
41823 #include <linux/ioport.h>
41824 #include <linux/uaccess.h>
41825 @@ -321,6 +322,21 @@ static inline void task_context_switch_c
41826 p->nivcsw);
41827 }
41828
41829 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41830 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
41831 +{
41832 + if (p->mm)
41833 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
41834 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
41835 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
41836 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
41837 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
41838 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
41839 + else
41840 + seq_printf(m, "PaX:\t-----\n");
41841 +}
41842 +#endif
41843 +
41844 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
41845 struct pid *pid, struct task_struct *task)
41846 {
41847 @@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
41848 task_cap(m, task);
41849 cpuset_task_status_allowed(m, task);
41850 task_context_switch_counts(m, task);
41851 +
41852 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
41853 + task_pax(m, task);
41854 +#endif
41855 +
41856 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
41857 + task_grsec_rbac(m, task);
41858 +#endif
41859 +
41860 return 0;
41861 }
41862
41863 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41864 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
41865 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
41866 + _mm->pax_flags & MF_PAX_SEGMEXEC))
41867 +#endif
41868 +
41869 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
41870 struct pid *pid, struct task_struct *task, int whole)
41871 {
41872 @@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
41873 cputime_t cutime, cstime, utime, stime;
41874 cputime_t cgtime, gtime;
41875 unsigned long rsslim = 0;
41876 - char tcomm[sizeof(task->comm)];
41877 + char tcomm[sizeof(task->comm)] = { 0 };
41878 unsigned long flags;
41879
41880 + pax_track_stack();
41881 +
41882 state = *get_task_state(task);
41883 vsize = eip = esp = 0;
41884 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
41885 @@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
41886 gtime = task_gtime(task);
41887 }
41888
41889 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41890 + if (PAX_RAND_FLAGS(mm)) {
41891 + eip = 0;
41892 + esp = 0;
41893 + wchan = 0;
41894 + }
41895 +#endif
41896 +#ifdef CONFIG_GRKERNSEC_HIDESYM
41897 + wchan = 0;
41898 + eip =0;
41899 + esp =0;
41900 +#endif
41901 +
41902 /* scale priority and nice values from timeslices to -20..20 */
41903 /* to make it look like a "normal" Unix priority/nice value */
41904 priority = task_prio(task);
41905 @@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
41906 vsize,
41907 mm ? get_mm_rss(mm) : 0,
41908 rsslim,
41909 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41910 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
41911 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
41912 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
41913 +#else
41914 mm ? (permitted ? mm->start_code : 1) : 0,
41915 mm ? (permitted ? mm->end_code : 1) : 0,
41916 (permitted && mm) ? mm->start_stack : 0,
41917 +#endif
41918 esp,
41919 eip,
41920 /* The signal information here is obsolete.
41921 @@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
41922
41923 return 0;
41924 }
41925 +
41926 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
41927 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
41928 +{
41929 + u32 curr_ip = 0;
41930 + unsigned long flags;
41931 +
41932 + if (lock_task_sighand(task, &flags)) {
41933 + curr_ip = task->signal->curr_ip;
41934 + unlock_task_sighand(task, &flags);
41935 + }
41936 +
41937 + return sprintf(buffer, "%pI4\n", &curr_ip);
41938 +}
41939 +#endif
41940 diff -urNp linux-2.6.32.42/fs/proc/base.c linux-2.6.32.42/fs/proc/base.c
41941 --- linux-2.6.32.42/fs/proc/base.c 2011-04-22 19:16:29.000000000 -0400
41942 +++ linux-2.6.32.42/fs/proc/base.c 2011-06-04 21:20:50.000000000 -0400
41943 @@ -102,6 +102,22 @@ struct pid_entry {
41944 union proc_op op;
41945 };
41946
41947 +struct getdents_callback {
41948 + struct linux_dirent __user * current_dir;
41949 + struct linux_dirent __user * previous;
41950 + struct file * file;
41951 + int count;
41952 + int error;
41953 +};
41954 +
41955 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
41956 + loff_t offset, u64 ino, unsigned int d_type)
41957 +{
41958 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
41959 + buf->error = -EINVAL;
41960 + return 0;
41961 +}
41962 +
41963 #define NOD(NAME, MODE, IOP, FOP, OP) { \
41964 .name = (NAME), \
41965 .len = sizeof(NAME) - 1, \
41966 @@ -213,6 +229,9 @@ static int check_mem_permission(struct t
41967 if (task == current)
41968 return 0;
41969
41970 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
41971 + return -EPERM;
41972 +
41973 /*
41974 * If current is actively ptrace'ing, and would also be
41975 * permitted to freshly attach with ptrace now, permit it.
41976 @@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
41977 if (!mm->arg_end)
41978 goto out_mm; /* Shh! No looking before we're done */
41979
41980 + if (gr_acl_handle_procpidmem(task))
41981 + goto out_mm;
41982 +
41983 len = mm->arg_end - mm->arg_start;
41984
41985 if (len > PAGE_SIZE)
41986 @@ -287,12 +309,28 @@ out:
41987 return res;
41988 }
41989
41990 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
41991 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
41992 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
41993 + _mm->pax_flags & MF_PAX_SEGMEXEC))
41994 +#endif
41995 +
41996 static int proc_pid_auxv(struct task_struct *task, char *buffer)
41997 {
41998 int res = 0;
41999 struct mm_struct *mm = get_task_mm(task);
42000 if (mm) {
42001 unsigned int nwords = 0;
42002 +
42003 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42004 + /* allow if we're currently ptracing this task */
42005 + if (PAX_RAND_FLAGS(mm) &&
42006 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
42007 + mmput(mm);
42008 + return res;
42009 + }
42010 +#endif
42011 +
42012 do {
42013 nwords += 2;
42014 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
42015 @@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
42016 }
42017
42018
42019 -#ifdef CONFIG_KALLSYMS
42020 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42021 /*
42022 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
42023 * Returns the resolved symbol. If that fails, simply return the address.
42024 @@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
42025 }
42026 #endif /* CONFIG_KALLSYMS */
42027
42028 -#ifdef CONFIG_STACKTRACE
42029 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42030
42031 #define MAX_STACK_TRACE_DEPTH 64
42032
42033 @@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
42034 return count;
42035 }
42036
42037 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42038 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42039 static int proc_pid_syscall(struct task_struct *task, char *buffer)
42040 {
42041 long nr;
42042 @@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
42043 /************************************************************************/
42044
42045 /* permission checks */
42046 -static int proc_fd_access_allowed(struct inode *inode)
42047 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
42048 {
42049 struct task_struct *task;
42050 int allowed = 0;
42051 @@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
42052 */
42053 task = get_proc_task(inode);
42054 if (task) {
42055 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42056 + if (log)
42057 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
42058 + else
42059 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42060 put_task_struct(task);
42061 }
42062 return allowed;
42063 @@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
42064 if (!task)
42065 goto out_no_task;
42066
42067 + if (gr_acl_handle_procpidmem(task))
42068 + goto out;
42069 +
42070 if (!ptrace_may_access(task, PTRACE_MODE_READ))
42071 goto out;
42072
42073 @@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
42074 path_put(&nd->path);
42075
42076 /* Are we allowed to snoop on the tasks file descriptors? */
42077 - if (!proc_fd_access_allowed(inode))
42078 + if (!proc_fd_access_allowed(inode,0))
42079 goto out;
42080
42081 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
42082 @@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
42083 struct path path;
42084
42085 /* Are we allowed to snoop on the tasks file descriptors? */
42086 - if (!proc_fd_access_allowed(inode))
42087 - goto out;
42088 + /* logging this is needed for learning on chromium to work properly,
42089 + but we don't want to flood the logs from 'ps' which does a readlink
42090 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
42091 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
42092 + */
42093 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
42094 + if (!proc_fd_access_allowed(inode,0))
42095 + goto out;
42096 + } else {
42097 + if (!proc_fd_access_allowed(inode,1))
42098 + goto out;
42099 + }
42100
42101 error = PROC_I(inode)->op.proc_get_link(inode, &path);
42102 if (error)
42103 @@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
42104 rcu_read_lock();
42105 cred = __task_cred(task);
42106 inode->i_uid = cred->euid;
42107 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42108 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42109 +#else
42110 inode->i_gid = cred->egid;
42111 +#endif
42112 rcu_read_unlock();
42113 }
42114 security_task_to_inode(task, inode);
42115 @@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
42116 struct inode *inode = dentry->d_inode;
42117 struct task_struct *task;
42118 const struct cred *cred;
42119 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42120 + const struct cred *tmpcred = current_cred();
42121 +#endif
42122
42123 generic_fillattr(inode, stat);
42124
42125 @@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
42126 stat->uid = 0;
42127 stat->gid = 0;
42128 task = pid_task(proc_pid(inode), PIDTYPE_PID);
42129 +
42130 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
42131 + rcu_read_unlock();
42132 + return -ENOENT;
42133 + }
42134 +
42135 if (task) {
42136 + cred = __task_cred(task);
42137 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42138 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
42139 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42140 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42141 +#endif
42142 + ) {
42143 +#endif
42144 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42145 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42146 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42147 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42148 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42149 +#endif
42150 task_dumpable(task)) {
42151 - cred = __task_cred(task);
42152 stat->uid = cred->euid;
42153 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42154 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
42155 +#else
42156 stat->gid = cred->egid;
42157 +#endif
42158 }
42159 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42160 + } else {
42161 + rcu_read_unlock();
42162 + return -ENOENT;
42163 + }
42164 +#endif
42165 }
42166 rcu_read_unlock();
42167 return 0;
42168 @@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
42169
42170 if (task) {
42171 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42172 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42173 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42174 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42175 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42176 +#endif
42177 task_dumpable(task)) {
42178 rcu_read_lock();
42179 cred = __task_cred(task);
42180 inode->i_uid = cred->euid;
42181 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42182 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42183 +#else
42184 inode->i_gid = cred->egid;
42185 +#endif
42186 rcu_read_unlock();
42187 } else {
42188 inode->i_uid = 0;
42189 @@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
42190 int fd = proc_fd(inode);
42191
42192 if (task) {
42193 - files = get_files_struct(task);
42194 + if (!gr_acl_handle_procpidmem(task))
42195 + files = get_files_struct(task);
42196 put_task_struct(task);
42197 }
42198 if (files) {
42199 @@ -1895,12 +1994,22 @@ static const struct file_operations proc
42200 static int proc_fd_permission(struct inode *inode, int mask)
42201 {
42202 int rv;
42203 + struct task_struct *task;
42204
42205 rv = generic_permission(inode, mask, NULL);
42206 - if (rv == 0)
42207 - return 0;
42208 +
42209 if (task_pid(current) == proc_pid(inode))
42210 rv = 0;
42211 +
42212 + task = get_proc_task(inode);
42213 + if (task == NULL)
42214 + return rv;
42215 +
42216 + if (gr_acl_handle_procpidmem(task))
42217 + rv = -EACCES;
42218 +
42219 + put_task_struct(task);
42220 +
42221 return rv;
42222 }
42223
42224 @@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
42225 if (!task)
42226 goto out_no_task;
42227
42228 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42229 + goto out;
42230 +
42231 /*
42232 * Yes, it does not scale. And it should not. Don't add
42233 * new entries into /proc/<tgid>/ without very good reasons.
42234 @@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
42235 if (!task)
42236 goto out_no_task;
42237
42238 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42239 + goto out;
42240 +
42241 ret = 0;
42242 i = filp->f_pos;
42243 switch (i) {
42244 @@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
42245 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
42246 void *cookie)
42247 {
42248 - char *s = nd_get_link(nd);
42249 + const char *s = nd_get_link(nd);
42250 if (!IS_ERR(s))
42251 __putname(s);
42252 }
42253 @@ -2519,7 +2634,7 @@ static const struct pid_entry tgid_base_
42254 #ifdef CONFIG_SCHED_DEBUG
42255 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42256 #endif
42257 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42258 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42259 INF("syscall", S_IRUSR, proc_pid_syscall),
42260 #endif
42261 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42262 @@ -2544,10 +2659,10 @@ static const struct pid_entry tgid_base_
42263 #ifdef CONFIG_SECURITY
42264 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42265 #endif
42266 -#ifdef CONFIG_KALLSYMS
42267 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42268 INF("wchan", S_IRUGO, proc_pid_wchan),
42269 #endif
42270 -#ifdef CONFIG_STACKTRACE
42271 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42272 ONE("stack", S_IRUSR, proc_pid_stack),
42273 #endif
42274 #ifdef CONFIG_SCHEDSTATS
42275 @@ -2577,6 +2692,9 @@ static const struct pid_entry tgid_base_
42276 #ifdef CONFIG_TASK_IO_ACCOUNTING
42277 INF("io", S_IRUGO, proc_tgid_io_accounting),
42278 #endif
42279 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42280 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
42281 +#endif
42282 };
42283
42284 static int proc_tgid_base_readdir(struct file * filp,
42285 @@ -2701,7 +2819,14 @@ static struct dentry *proc_pid_instantia
42286 if (!inode)
42287 goto out;
42288
42289 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42290 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
42291 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42292 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42293 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
42294 +#else
42295 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
42296 +#endif
42297 inode->i_op = &proc_tgid_base_inode_operations;
42298 inode->i_fop = &proc_tgid_base_operations;
42299 inode->i_flags|=S_IMMUTABLE;
42300 @@ -2743,7 +2868,11 @@ struct dentry *proc_pid_lookup(struct in
42301 if (!task)
42302 goto out;
42303
42304 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42305 + goto out_put_task;
42306 +
42307 result = proc_pid_instantiate(dir, dentry, task, NULL);
42308 +out_put_task:
42309 put_task_struct(task);
42310 out:
42311 return result;
42312 @@ -2808,6 +2937,11 @@ int proc_pid_readdir(struct file * filp,
42313 {
42314 unsigned int nr;
42315 struct task_struct *reaper;
42316 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42317 + const struct cred *tmpcred = current_cred();
42318 + const struct cred *itercred;
42319 +#endif
42320 + filldir_t __filldir = filldir;
42321 struct tgid_iter iter;
42322 struct pid_namespace *ns;
42323
42324 @@ -2831,8 +2965,27 @@ int proc_pid_readdir(struct file * filp,
42325 for (iter = next_tgid(ns, iter);
42326 iter.task;
42327 iter.tgid += 1, iter = next_tgid(ns, iter)) {
42328 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42329 + rcu_read_lock();
42330 + itercred = __task_cred(iter.task);
42331 +#endif
42332 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
42333 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42334 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
42335 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42336 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42337 +#endif
42338 + )
42339 +#endif
42340 + )
42341 + __filldir = &gr_fake_filldir;
42342 + else
42343 + __filldir = filldir;
42344 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42345 + rcu_read_unlock();
42346 +#endif
42347 filp->f_pos = iter.tgid + TGID_OFFSET;
42348 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
42349 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
42350 put_task_struct(iter.task);
42351 goto out;
42352 }
42353 @@ -2858,7 +3011,7 @@ static const struct pid_entry tid_base_s
42354 #ifdef CONFIG_SCHED_DEBUG
42355 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42356 #endif
42357 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42358 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42359 INF("syscall", S_IRUSR, proc_pid_syscall),
42360 #endif
42361 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42362 @@ -2882,10 +3035,10 @@ static const struct pid_entry tid_base_s
42363 #ifdef CONFIG_SECURITY
42364 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
42365 #endif
42366 -#ifdef CONFIG_KALLSYMS
42367 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42368 INF("wchan", S_IRUGO, proc_pid_wchan),
42369 #endif
42370 -#ifdef CONFIG_STACKTRACE
42371 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42372 ONE("stack", S_IRUSR, proc_pid_stack),
42373 #endif
42374 #ifdef CONFIG_SCHEDSTATS
42375 diff -urNp linux-2.6.32.42/fs/proc/cmdline.c linux-2.6.32.42/fs/proc/cmdline.c
42376 --- linux-2.6.32.42/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
42377 +++ linux-2.6.32.42/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
42378 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
42379
42380 static int __init proc_cmdline_init(void)
42381 {
42382 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42383 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
42384 +#else
42385 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
42386 +#endif
42387 return 0;
42388 }
42389 module_init(proc_cmdline_init);
42390 diff -urNp linux-2.6.32.42/fs/proc/devices.c linux-2.6.32.42/fs/proc/devices.c
42391 --- linux-2.6.32.42/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
42392 +++ linux-2.6.32.42/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
42393 @@ -64,7 +64,11 @@ static const struct file_operations proc
42394
42395 static int __init proc_devices_init(void)
42396 {
42397 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42398 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
42399 +#else
42400 proc_create("devices", 0, NULL, &proc_devinfo_operations);
42401 +#endif
42402 return 0;
42403 }
42404 module_init(proc_devices_init);
42405 diff -urNp linux-2.6.32.42/fs/proc/inode.c linux-2.6.32.42/fs/proc/inode.c
42406 --- linux-2.6.32.42/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
42407 +++ linux-2.6.32.42/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
42408 @@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
42409 if (de->mode) {
42410 inode->i_mode = de->mode;
42411 inode->i_uid = de->uid;
42412 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42413 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42414 +#else
42415 inode->i_gid = de->gid;
42416 +#endif
42417 }
42418 if (de->size)
42419 inode->i_size = de->size;
42420 diff -urNp linux-2.6.32.42/fs/proc/internal.h linux-2.6.32.42/fs/proc/internal.h
42421 --- linux-2.6.32.42/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
42422 +++ linux-2.6.32.42/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
42423 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
42424 struct pid *pid, struct task_struct *task);
42425 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
42426 struct pid *pid, struct task_struct *task);
42427 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42428 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
42429 +#endif
42430 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
42431
42432 extern const struct file_operations proc_maps_operations;
42433 diff -urNp linux-2.6.32.42/fs/proc/Kconfig linux-2.6.32.42/fs/proc/Kconfig
42434 --- linux-2.6.32.42/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
42435 +++ linux-2.6.32.42/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
42436 @@ -30,12 +30,12 @@ config PROC_FS
42437
42438 config PROC_KCORE
42439 bool "/proc/kcore support" if !ARM
42440 - depends on PROC_FS && MMU
42441 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
42442
42443 config PROC_VMCORE
42444 bool "/proc/vmcore support (EXPERIMENTAL)"
42445 - depends on PROC_FS && CRASH_DUMP
42446 - default y
42447 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
42448 + default n
42449 help
42450 Exports the dump image of crashed kernel in ELF format.
42451
42452 @@ -59,8 +59,8 @@ config PROC_SYSCTL
42453 limited in memory.
42454
42455 config PROC_PAGE_MONITOR
42456 - default y
42457 - depends on PROC_FS && MMU
42458 + default n
42459 + depends on PROC_FS && MMU && !GRKERNSEC
42460 bool "Enable /proc page monitoring" if EMBEDDED
42461 help
42462 Various /proc files exist to monitor process memory utilization:
42463 diff -urNp linux-2.6.32.42/fs/proc/kcore.c linux-2.6.32.42/fs/proc/kcore.c
42464 --- linux-2.6.32.42/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
42465 +++ linux-2.6.32.42/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
42466 @@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
42467 off_t offset = 0;
42468 struct kcore_list *m;
42469
42470 + pax_track_stack();
42471 +
42472 /* setup ELF header */
42473 elf = (struct elfhdr *) bufp;
42474 bufp += sizeof(struct elfhdr);
42475 @@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
42476 * the addresses in the elf_phdr on our list.
42477 */
42478 start = kc_offset_to_vaddr(*fpos - elf_buflen);
42479 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
42480 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
42481 + if (tsz > buflen)
42482 tsz = buflen;
42483 -
42484 +
42485 while (buflen) {
42486 struct kcore_list *m;
42487
42488 @@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
42489 kfree(elf_buf);
42490 } else {
42491 if (kern_addr_valid(start)) {
42492 - unsigned long n;
42493 + char *elf_buf;
42494 + mm_segment_t oldfs;
42495
42496 - n = copy_to_user(buffer, (char *)start, tsz);
42497 - /*
42498 - * We cannot distingush between fault on source
42499 - * and fault on destination. When this happens
42500 - * we clear too and hope it will trigger the
42501 - * EFAULT again.
42502 - */
42503 - if (n) {
42504 - if (clear_user(buffer + tsz - n,
42505 - n))
42506 + elf_buf = kmalloc(tsz, GFP_KERNEL);
42507 + if (!elf_buf)
42508 + return -ENOMEM;
42509 + oldfs = get_fs();
42510 + set_fs(KERNEL_DS);
42511 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
42512 + set_fs(oldfs);
42513 + if (copy_to_user(buffer, elf_buf, tsz)) {
42514 + kfree(elf_buf);
42515 return -EFAULT;
42516 + }
42517 }
42518 + set_fs(oldfs);
42519 + kfree(elf_buf);
42520 } else {
42521 if (clear_user(buffer, tsz))
42522 return -EFAULT;
42523 @@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
42524
42525 static int open_kcore(struct inode *inode, struct file *filp)
42526 {
42527 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
42528 + return -EPERM;
42529 +#endif
42530 if (!capable(CAP_SYS_RAWIO))
42531 return -EPERM;
42532 if (kcore_need_update)
42533 diff -urNp linux-2.6.32.42/fs/proc/meminfo.c linux-2.6.32.42/fs/proc/meminfo.c
42534 --- linux-2.6.32.42/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
42535 +++ linux-2.6.32.42/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
42536 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
42537 unsigned long pages[NR_LRU_LISTS];
42538 int lru;
42539
42540 + pax_track_stack();
42541 +
42542 /*
42543 * display in kilobytes.
42544 */
42545 @@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
42546 vmi.used >> 10,
42547 vmi.largest_chunk >> 10
42548 #ifdef CONFIG_MEMORY_FAILURE
42549 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
42550 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
42551 #endif
42552 );
42553
42554 diff -urNp linux-2.6.32.42/fs/proc/nommu.c linux-2.6.32.42/fs/proc/nommu.c
42555 --- linux-2.6.32.42/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
42556 +++ linux-2.6.32.42/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
42557 @@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
42558 if (len < 1)
42559 len = 1;
42560 seq_printf(m, "%*c", len, ' ');
42561 - seq_path(m, &file->f_path, "");
42562 + seq_path(m, &file->f_path, "\n\\");
42563 }
42564
42565 seq_putc(m, '\n');
42566 diff -urNp linux-2.6.32.42/fs/proc/proc_net.c linux-2.6.32.42/fs/proc/proc_net.c
42567 --- linux-2.6.32.42/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
42568 +++ linux-2.6.32.42/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
42569 @@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
42570 struct task_struct *task;
42571 struct nsproxy *ns;
42572 struct net *net = NULL;
42573 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42574 + const struct cred *cred = current_cred();
42575 +#endif
42576 +
42577 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42578 + if (cred->fsuid)
42579 + return net;
42580 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42581 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
42582 + return net;
42583 +#endif
42584
42585 rcu_read_lock();
42586 task = pid_task(proc_pid(dir), PIDTYPE_PID);
42587 diff -urNp linux-2.6.32.42/fs/proc/proc_sysctl.c linux-2.6.32.42/fs/proc/proc_sysctl.c
42588 --- linux-2.6.32.42/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
42589 +++ linux-2.6.32.42/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
42590 @@ -7,6 +7,8 @@
42591 #include <linux/security.h>
42592 #include "internal.h"
42593
42594 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
42595 +
42596 static const struct dentry_operations proc_sys_dentry_operations;
42597 static const struct file_operations proc_sys_file_operations;
42598 static const struct inode_operations proc_sys_inode_operations;
42599 @@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
42600 if (!p)
42601 goto out;
42602
42603 + if (gr_handle_sysctl(p, MAY_EXEC))
42604 + goto out;
42605 +
42606 err = ERR_PTR(-ENOMEM);
42607 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
42608 if (h)
42609 @@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
42610 if (*pos < file->f_pos)
42611 continue;
42612
42613 + if (gr_handle_sysctl(table, 0))
42614 + continue;
42615 +
42616 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
42617 if (res)
42618 return res;
42619 @@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
42620 if (IS_ERR(head))
42621 return PTR_ERR(head);
42622
42623 + if (table && gr_handle_sysctl(table, MAY_EXEC))
42624 + return -ENOENT;
42625 +
42626 generic_fillattr(inode, stat);
42627 if (table)
42628 stat->mode = (stat->mode & S_IFMT) | table->mode;
42629 diff -urNp linux-2.6.32.42/fs/proc/root.c linux-2.6.32.42/fs/proc/root.c
42630 --- linux-2.6.32.42/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
42631 +++ linux-2.6.32.42/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
42632 @@ -134,7 +134,15 @@ void __init proc_root_init(void)
42633 #ifdef CONFIG_PROC_DEVICETREE
42634 proc_device_tree_init();
42635 #endif
42636 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
42637 +#ifdef CONFIG_GRKERNSEC_PROC_USER
42638 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
42639 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42640 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
42641 +#endif
42642 +#else
42643 proc_mkdir("bus", NULL);
42644 +#endif
42645 proc_sys_init();
42646 }
42647
42648 diff -urNp linux-2.6.32.42/fs/proc/task_mmu.c linux-2.6.32.42/fs/proc/task_mmu.c
42649 --- linux-2.6.32.42/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
42650 +++ linux-2.6.32.42/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
42651 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
42652 "VmStk:\t%8lu kB\n"
42653 "VmExe:\t%8lu kB\n"
42654 "VmLib:\t%8lu kB\n"
42655 - "VmPTE:\t%8lu kB\n",
42656 - hiwater_vm << (PAGE_SHIFT-10),
42657 + "VmPTE:\t%8lu kB\n"
42658 +
42659 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42660 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
42661 +#endif
42662 +
42663 + ,hiwater_vm << (PAGE_SHIFT-10),
42664 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
42665 mm->locked_vm << (PAGE_SHIFT-10),
42666 hiwater_rss << (PAGE_SHIFT-10),
42667 total_rss << (PAGE_SHIFT-10),
42668 data << (PAGE_SHIFT-10),
42669 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
42670 - (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
42671 + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
42672 +
42673 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
42674 + , mm->context.user_cs_base, mm->context.user_cs_limit
42675 +#endif
42676 +
42677 + );
42678 }
42679
42680 unsigned long task_vsize(struct mm_struct *mm)
42681 @@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
42682 struct proc_maps_private *priv = m->private;
42683 struct vm_area_struct *vma = v;
42684
42685 - vma_stop(priv, vma);
42686 + if (!IS_ERR(vma))
42687 + vma_stop(priv, vma);
42688 if (priv->task)
42689 put_task_struct(priv->task);
42690 }
42691 @@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
42692 return ret;
42693 }
42694
42695 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42696 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42697 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
42698 + _mm->pax_flags & MF_PAX_SEGMEXEC))
42699 +#endif
42700 +
42701 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
42702 {
42703 struct mm_struct *mm = vma->vm_mm;
42704 @@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
42705 int flags = vma->vm_flags;
42706 unsigned long ino = 0;
42707 unsigned long long pgoff = 0;
42708 - unsigned long start;
42709 dev_t dev = 0;
42710 int len;
42711
42712 @@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
42713 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
42714 }
42715
42716 - /* We don't show the stack guard page in /proc/maps */
42717 - start = vma->vm_start;
42718 - if (vma->vm_flags & VM_GROWSDOWN)
42719 - if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
42720 - start += PAGE_SIZE;
42721 -
42722 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
42723 - start,
42724 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42725 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
42726 + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
42727 +#else
42728 + vma->vm_start,
42729 vma->vm_end,
42730 +#endif
42731 flags & VM_READ ? 'r' : '-',
42732 flags & VM_WRITE ? 'w' : '-',
42733 flags & VM_EXEC ? 'x' : '-',
42734 flags & VM_MAYSHARE ? 's' : 'p',
42735 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42736 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
42737 +#else
42738 pgoff,
42739 +#endif
42740 MAJOR(dev), MINOR(dev), ino, &len);
42741
42742 /*
42743 @@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
42744 */
42745 if (file) {
42746 pad_len_spaces(m, len);
42747 - seq_path(m, &file->f_path, "\n");
42748 + seq_path(m, &file->f_path, "\n\\");
42749 } else {
42750 const char *name = arch_vma_name(vma);
42751 if (!name) {
42752 @@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
42753 if (vma->vm_start <= mm->brk &&
42754 vma->vm_end >= mm->start_brk) {
42755 name = "[heap]";
42756 - } else if (vma->vm_start <= mm->start_stack &&
42757 - vma->vm_end >= mm->start_stack) {
42758 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
42759 + (vma->vm_start <= mm->start_stack &&
42760 + vma->vm_end >= mm->start_stack)) {
42761 name = "[stack]";
42762 }
42763 } else {
42764 @@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
42765 };
42766
42767 memset(&mss, 0, sizeof mss);
42768 - mss.vma = vma;
42769 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
42770 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
42771 +
42772 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42773 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
42774 +#endif
42775 + mss.vma = vma;
42776 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
42777 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
42778 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42779 + }
42780 +#endif
42781
42782 show_map_vma(m, vma);
42783
42784 @@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
42785 "Swap: %8lu kB\n"
42786 "KernelPageSize: %8lu kB\n"
42787 "MMUPageSize: %8lu kB\n",
42788 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42789 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
42790 +#else
42791 (vma->vm_end - vma->vm_start) >> 10,
42792 +#endif
42793 mss.resident >> 10,
42794 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
42795 mss.shared_clean >> 10,
42796 diff -urNp linux-2.6.32.42/fs/proc/task_nommu.c linux-2.6.32.42/fs/proc/task_nommu.c
42797 --- linux-2.6.32.42/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
42798 +++ linux-2.6.32.42/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
42799 @@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
42800 else
42801 bytes += kobjsize(mm);
42802
42803 - if (current->fs && current->fs->users > 1)
42804 + if (current->fs && atomic_read(&current->fs->users) > 1)
42805 sbytes += kobjsize(current->fs);
42806 else
42807 bytes += kobjsize(current->fs);
42808 @@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
42809 if (len < 1)
42810 len = 1;
42811 seq_printf(m, "%*c", len, ' ');
42812 - seq_path(m, &file->f_path, "");
42813 + seq_path(m, &file->f_path, "\n\\");
42814 }
42815
42816 seq_putc(m, '\n');
42817 diff -urNp linux-2.6.32.42/fs/readdir.c linux-2.6.32.42/fs/readdir.c
42818 --- linux-2.6.32.42/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
42819 +++ linux-2.6.32.42/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
42820 @@ -16,6 +16,7 @@
42821 #include <linux/security.h>
42822 #include <linux/syscalls.h>
42823 #include <linux/unistd.h>
42824 +#include <linux/namei.h>
42825
42826 #include <asm/uaccess.h>
42827
42828 @@ -67,6 +68,7 @@ struct old_linux_dirent {
42829
42830 struct readdir_callback {
42831 struct old_linux_dirent __user * dirent;
42832 + struct file * file;
42833 int result;
42834 };
42835
42836 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
42837 buf->result = -EOVERFLOW;
42838 return -EOVERFLOW;
42839 }
42840 +
42841 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42842 + return 0;
42843 +
42844 buf->result++;
42845 dirent = buf->dirent;
42846 if (!access_ok(VERIFY_WRITE, dirent,
42847 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
42848
42849 buf.result = 0;
42850 buf.dirent = dirent;
42851 + buf.file = file;
42852
42853 error = vfs_readdir(file, fillonedir, &buf);
42854 if (buf.result)
42855 @@ -142,6 +149,7 @@ struct linux_dirent {
42856 struct getdents_callback {
42857 struct linux_dirent __user * current_dir;
42858 struct linux_dirent __user * previous;
42859 + struct file * file;
42860 int count;
42861 int error;
42862 };
42863 @@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
42864 buf->error = -EOVERFLOW;
42865 return -EOVERFLOW;
42866 }
42867 +
42868 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42869 + return 0;
42870 +
42871 dirent = buf->previous;
42872 if (dirent) {
42873 if (__put_user(offset, &dirent->d_off))
42874 @@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
42875 buf.previous = NULL;
42876 buf.count = count;
42877 buf.error = 0;
42878 + buf.file = file;
42879
42880 error = vfs_readdir(file, filldir, &buf);
42881 if (error >= 0)
42882 @@ -228,6 +241,7 @@ out:
42883 struct getdents_callback64 {
42884 struct linux_dirent64 __user * current_dir;
42885 struct linux_dirent64 __user * previous;
42886 + struct file *file;
42887 int count;
42888 int error;
42889 };
42890 @@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
42891 buf->error = -EINVAL; /* only used if we fail.. */
42892 if (reclen > buf->count)
42893 return -EINVAL;
42894 +
42895 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
42896 + return 0;
42897 +
42898 dirent = buf->previous;
42899 if (dirent) {
42900 if (__put_user(offset, &dirent->d_off))
42901 @@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
42902
42903 buf.current_dir = dirent;
42904 buf.previous = NULL;
42905 + buf.file = file;
42906 buf.count = count;
42907 buf.error = 0;
42908
42909 diff -urNp linux-2.6.32.42/fs/reiserfs/dir.c linux-2.6.32.42/fs/reiserfs/dir.c
42910 --- linux-2.6.32.42/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
42911 +++ linux-2.6.32.42/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
42912 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
42913 struct reiserfs_dir_entry de;
42914 int ret = 0;
42915
42916 + pax_track_stack();
42917 +
42918 reiserfs_write_lock(inode->i_sb);
42919
42920 reiserfs_check_lock_depth(inode->i_sb, "readdir");
42921 diff -urNp linux-2.6.32.42/fs/reiserfs/do_balan.c linux-2.6.32.42/fs/reiserfs/do_balan.c
42922 --- linux-2.6.32.42/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
42923 +++ linux-2.6.32.42/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
42924 @@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
42925 return;
42926 }
42927
42928 - atomic_inc(&(fs_generation(tb->tb_sb)));
42929 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
42930 do_balance_starts(tb);
42931
42932 /* balance leaf returns 0 except if combining L R and S into
42933 diff -urNp linux-2.6.32.42/fs/reiserfs/item_ops.c linux-2.6.32.42/fs/reiserfs/item_ops.c
42934 --- linux-2.6.32.42/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
42935 +++ linux-2.6.32.42/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
42936 @@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
42937 vi->vi_index, vi->vi_type, vi->vi_ih);
42938 }
42939
42940 -static struct item_operations stat_data_ops = {
42941 +static const struct item_operations stat_data_ops = {
42942 .bytes_number = sd_bytes_number,
42943 .decrement_key = sd_decrement_key,
42944 .is_left_mergeable = sd_is_left_mergeable,
42945 @@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
42946 vi->vi_index, vi->vi_type, vi->vi_ih);
42947 }
42948
42949 -static struct item_operations direct_ops = {
42950 +static const struct item_operations direct_ops = {
42951 .bytes_number = direct_bytes_number,
42952 .decrement_key = direct_decrement_key,
42953 .is_left_mergeable = direct_is_left_mergeable,
42954 @@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
42955 vi->vi_index, vi->vi_type, vi->vi_ih);
42956 }
42957
42958 -static struct item_operations indirect_ops = {
42959 +static const struct item_operations indirect_ops = {
42960 .bytes_number = indirect_bytes_number,
42961 .decrement_key = indirect_decrement_key,
42962 .is_left_mergeable = indirect_is_left_mergeable,
42963 @@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
42964 printk("\n");
42965 }
42966
42967 -static struct item_operations direntry_ops = {
42968 +static const struct item_operations direntry_ops = {
42969 .bytes_number = direntry_bytes_number,
42970 .decrement_key = direntry_decrement_key,
42971 .is_left_mergeable = direntry_is_left_mergeable,
42972 @@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
42973 "Invalid item type observed, run fsck ASAP");
42974 }
42975
42976 -static struct item_operations errcatch_ops = {
42977 +static const struct item_operations errcatch_ops = {
42978 errcatch_bytes_number,
42979 errcatch_decrement_key,
42980 errcatch_is_left_mergeable,
42981 @@ -746,7 +746,7 @@ static struct item_operations errcatch_o
42982 #error Item types must use disk-format assigned values.
42983 #endif
42984
42985 -struct item_operations *item_ops[TYPE_ANY + 1] = {
42986 +const struct item_operations * const item_ops[TYPE_ANY + 1] = {
42987 &stat_data_ops,
42988 &indirect_ops,
42989 &direct_ops,
42990 diff -urNp linux-2.6.32.42/fs/reiserfs/journal.c linux-2.6.32.42/fs/reiserfs/journal.c
42991 --- linux-2.6.32.42/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
42992 +++ linux-2.6.32.42/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
42993 @@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
42994 struct buffer_head *bh;
42995 int i, j;
42996
42997 + pax_track_stack();
42998 +
42999 bh = __getblk(dev, block, bufsize);
43000 if (buffer_uptodate(bh))
43001 return (bh);
43002 diff -urNp linux-2.6.32.42/fs/reiserfs/namei.c linux-2.6.32.42/fs/reiserfs/namei.c
43003 --- linux-2.6.32.42/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
43004 +++ linux-2.6.32.42/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
43005 @@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
43006 unsigned long savelink = 1;
43007 struct timespec ctime;
43008
43009 + pax_track_stack();
43010 +
43011 /* three balancings: (1) old name removal, (2) new name insertion
43012 and (3) maybe "save" link insertion
43013 stat data updates: (1) old directory,
43014 diff -urNp linux-2.6.32.42/fs/reiserfs/procfs.c linux-2.6.32.42/fs/reiserfs/procfs.c
43015 --- linux-2.6.32.42/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
43016 +++ linux-2.6.32.42/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
43017 @@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
43018 "SMALL_TAILS " : "NO_TAILS ",
43019 replay_only(sb) ? "REPLAY_ONLY " : "",
43020 convert_reiserfs(sb) ? "CONV " : "",
43021 - atomic_read(&r->s_generation_counter),
43022 + atomic_read_unchecked(&r->s_generation_counter),
43023 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
43024 SF(s_do_balance), SF(s_unneeded_left_neighbor),
43025 SF(s_good_search_by_key_reada), SF(s_bmaps),
43026 @@ -309,6 +309,8 @@ static int show_journal(struct seq_file
43027 struct journal_params *jp = &rs->s_v1.s_journal;
43028 char b[BDEVNAME_SIZE];
43029
43030 + pax_track_stack();
43031 +
43032 seq_printf(m, /* on-disk fields */
43033 "jp_journal_1st_block: \t%i\n"
43034 "jp_journal_dev: \t%s[%x]\n"
43035 diff -urNp linux-2.6.32.42/fs/reiserfs/stree.c linux-2.6.32.42/fs/reiserfs/stree.c
43036 --- linux-2.6.32.42/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
43037 +++ linux-2.6.32.42/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
43038 @@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
43039 int iter = 0;
43040 #endif
43041
43042 + pax_track_stack();
43043 +
43044 BUG_ON(!th->t_trans_id);
43045
43046 init_tb_struct(th, &s_del_balance, sb, path,
43047 @@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
43048 int retval;
43049 int quota_cut_bytes = 0;
43050
43051 + pax_track_stack();
43052 +
43053 BUG_ON(!th->t_trans_id);
43054
43055 le_key2cpu_key(&cpu_key, key);
43056 @@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
43057 int quota_cut_bytes;
43058 loff_t tail_pos = 0;
43059
43060 + pax_track_stack();
43061 +
43062 BUG_ON(!th->t_trans_id);
43063
43064 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
43065 @@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
43066 int retval;
43067 int fs_gen;
43068
43069 + pax_track_stack();
43070 +
43071 BUG_ON(!th->t_trans_id);
43072
43073 fs_gen = get_generation(inode->i_sb);
43074 @@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
43075 int fs_gen = 0;
43076 int quota_bytes = 0;
43077
43078 + pax_track_stack();
43079 +
43080 BUG_ON(!th->t_trans_id);
43081
43082 if (inode) { /* Do we count quotas for item? */
43083 diff -urNp linux-2.6.32.42/fs/reiserfs/super.c linux-2.6.32.42/fs/reiserfs/super.c
43084 --- linux-2.6.32.42/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
43085 +++ linux-2.6.32.42/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
43086 @@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
43087 {.option_name = NULL}
43088 };
43089
43090 + pax_track_stack();
43091 +
43092 *blocks = 0;
43093 if (!options || !*options)
43094 /* use default configuration: create tails, journaling on, no
43095 diff -urNp linux-2.6.32.42/fs/select.c linux-2.6.32.42/fs/select.c
43096 --- linux-2.6.32.42/fs/select.c 2011-03-27 14:31:47.000000000 -0400
43097 +++ linux-2.6.32.42/fs/select.c 2011-05-16 21:46:57.000000000 -0400
43098 @@ -20,6 +20,7 @@
43099 #include <linux/module.h>
43100 #include <linux/slab.h>
43101 #include <linux/poll.h>
43102 +#include <linux/security.h>
43103 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
43104 #include <linux/file.h>
43105 #include <linux/fdtable.h>
43106 @@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
43107 int retval, i, timed_out = 0;
43108 unsigned long slack = 0;
43109
43110 + pax_track_stack();
43111 +
43112 rcu_read_lock();
43113 retval = max_select_fd(n, fds);
43114 rcu_read_unlock();
43115 @@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
43116 /* Allocate small arguments on the stack to save memory and be faster */
43117 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
43118
43119 + pax_track_stack();
43120 +
43121 ret = -EINVAL;
43122 if (n < 0)
43123 goto out_nofds;
43124 @@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
43125 struct poll_list *walk = head;
43126 unsigned long todo = nfds;
43127
43128 + pax_track_stack();
43129 +
43130 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
43131 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
43132 return -EINVAL;
43133
43134 diff -urNp linux-2.6.32.42/fs/seq_file.c linux-2.6.32.42/fs/seq_file.c
43135 --- linux-2.6.32.42/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
43136 +++ linux-2.6.32.42/fs/seq_file.c 2011-04-17 15:56:46.000000000 -0400
43137 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
43138 return 0;
43139 }
43140 if (!m->buf) {
43141 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43142 + m->size = PAGE_SIZE;
43143 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43144 if (!m->buf)
43145 return -ENOMEM;
43146 }
43147 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
43148 Eoverflow:
43149 m->op->stop(m, p);
43150 kfree(m->buf);
43151 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43152 + m->size <<= 1;
43153 + m->buf = kmalloc(m->size, GFP_KERNEL);
43154 return !m->buf ? -ENOMEM : -EAGAIN;
43155 }
43156
43157 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
43158 m->version = file->f_version;
43159 /* grab buffer if we didn't have one */
43160 if (!m->buf) {
43161 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43162 + m->size = PAGE_SIZE;
43163 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43164 if (!m->buf)
43165 goto Enomem;
43166 }
43167 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
43168 goto Fill;
43169 m->op->stop(m, p);
43170 kfree(m->buf);
43171 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43172 + m->size <<= 1;
43173 + m->buf = kmalloc(m->size, GFP_KERNEL);
43174 if (!m->buf)
43175 goto Enomem;
43176 m->count = 0;
43177 diff -urNp linux-2.6.32.42/fs/smbfs/symlink.c linux-2.6.32.42/fs/smbfs/symlink.c
43178 --- linux-2.6.32.42/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
43179 +++ linux-2.6.32.42/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
43180 @@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
43181
43182 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43183 {
43184 - char *s = nd_get_link(nd);
43185 + const char *s = nd_get_link(nd);
43186 if (!IS_ERR(s))
43187 __putname(s);
43188 }
43189 diff -urNp linux-2.6.32.42/fs/splice.c linux-2.6.32.42/fs/splice.c
43190 --- linux-2.6.32.42/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
43191 +++ linux-2.6.32.42/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
43192 @@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
43193 pipe_lock(pipe);
43194
43195 for (;;) {
43196 - if (!pipe->readers) {
43197 + if (!atomic_read(&pipe->readers)) {
43198 send_sig(SIGPIPE, current, 0);
43199 if (!ret)
43200 ret = -EPIPE;
43201 @@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
43202 do_wakeup = 0;
43203 }
43204
43205 - pipe->waiting_writers++;
43206 + atomic_inc(&pipe->waiting_writers);
43207 pipe_wait(pipe);
43208 - pipe->waiting_writers--;
43209 + atomic_dec(&pipe->waiting_writers);
43210 }
43211
43212 pipe_unlock(pipe);
43213 @@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
43214 .spd_release = spd_release_page,
43215 };
43216
43217 + pax_track_stack();
43218 +
43219 index = *ppos >> PAGE_CACHE_SHIFT;
43220 loff = *ppos & ~PAGE_CACHE_MASK;
43221 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43222 @@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
43223 old_fs = get_fs();
43224 set_fs(get_ds());
43225 /* The cast to a user pointer is valid due to the set_fs() */
43226 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
43227 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
43228 set_fs(old_fs);
43229
43230 return res;
43231 @@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
43232 old_fs = get_fs();
43233 set_fs(get_ds());
43234 /* The cast to a user pointer is valid due to the set_fs() */
43235 - res = vfs_write(file, (const char __user *)buf, count, &pos);
43236 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
43237 set_fs(old_fs);
43238
43239 return res;
43240 @@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
43241 .spd_release = spd_release_page,
43242 };
43243
43244 + pax_track_stack();
43245 +
43246 index = *ppos >> PAGE_CACHE_SHIFT;
43247 offset = *ppos & ~PAGE_CACHE_MASK;
43248 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43249 @@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
43250 goto err;
43251
43252 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
43253 - vec[i].iov_base = (void __user *) page_address(page);
43254 + vec[i].iov_base = (__force void __user *) page_address(page);
43255 vec[i].iov_len = this_len;
43256 pages[i] = page;
43257 spd.nr_pages++;
43258 @@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
43259 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
43260 {
43261 while (!pipe->nrbufs) {
43262 - if (!pipe->writers)
43263 + if (!atomic_read(&pipe->writers))
43264 return 0;
43265
43266 - if (!pipe->waiting_writers && sd->num_spliced)
43267 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
43268 return 0;
43269
43270 if (sd->flags & SPLICE_F_NONBLOCK)
43271 @@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
43272 * out of the pipe right after the splice_to_pipe(). So set
43273 * PIPE_READERS appropriately.
43274 */
43275 - pipe->readers = 1;
43276 + atomic_set(&pipe->readers, 1);
43277
43278 current->splice_pipe = pipe;
43279 }
43280 @@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
43281 .spd_release = spd_release_page,
43282 };
43283
43284 + pax_track_stack();
43285 +
43286 pipe = pipe_info(file->f_path.dentry->d_inode);
43287 if (!pipe)
43288 return -EBADF;
43289 @@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
43290 ret = -ERESTARTSYS;
43291 break;
43292 }
43293 - if (!pipe->writers)
43294 + if (!atomic_read(&pipe->writers))
43295 break;
43296 - if (!pipe->waiting_writers) {
43297 + if (!atomic_read(&pipe->waiting_writers)) {
43298 if (flags & SPLICE_F_NONBLOCK) {
43299 ret = -EAGAIN;
43300 break;
43301 @@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
43302 pipe_lock(pipe);
43303
43304 while (pipe->nrbufs >= PIPE_BUFFERS) {
43305 - if (!pipe->readers) {
43306 + if (!atomic_read(&pipe->readers)) {
43307 send_sig(SIGPIPE, current, 0);
43308 ret = -EPIPE;
43309 break;
43310 @@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
43311 ret = -ERESTARTSYS;
43312 break;
43313 }
43314 - pipe->waiting_writers++;
43315 + atomic_inc(&pipe->waiting_writers);
43316 pipe_wait(pipe);
43317 - pipe->waiting_writers--;
43318 + atomic_dec(&pipe->waiting_writers);
43319 }
43320
43321 pipe_unlock(pipe);
43322 @@ -1785,14 +1791,14 @@ retry:
43323 pipe_double_lock(ipipe, opipe);
43324
43325 do {
43326 - if (!opipe->readers) {
43327 + if (!atomic_read(&opipe->readers)) {
43328 send_sig(SIGPIPE, current, 0);
43329 if (!ret)
43330 ret = -EPIPE;
43331 break;
43332 }
43333
43334 - if (!ipipe->nrbufs && !ipipe->writers)
43335 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
43336 break;
43337
43338 /*
43339 @@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
43340 pipe_double_lock(ipipe, opipe);
43341
43342 do {
43343 - if (!opipe->readers) {
43344 + if (!atomic_read(&opipe->readers)) {
43345 send_sig(SIGPIPE, current, 0);
43346 if (!ret)
43347 ret = -EPIPE;
43348 @@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
43349 * return EAGAIN if we have the potential of some data in the
43350 * future, otherwise just return 0
43351 */
43352 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
43353 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
43354 ret = -EAGAIN;
43355
43356 pipe_unlock(ipipe);
43357 diff -urNp linux-2.6.32.42/fs/sysfs/file.c linux-2.6.32.42/fs/sysfs/file.c
43358 --- linux-2.6.32.42/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
43359 +++ linux-2.6.32.42/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
43360 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
43361
43362 struct sysfs_open_dirent {
43363 atomic_t refcnt;
43364 - atomic_t event;
43365 + atomic_unchecked_t event;
43366 wait_queue_head_t poll;
43367 struct list_head buffers; /* goes through sysfs_buffer.list */
43368 };
43369 @@ -53,7 +53,7 @@ struct sysfs_buffer {
43370 size_t count;
43371 loff_t pos;
43372 char * page;
43373 - struct sysfs_ops * ops;
43374 + const struct sysfs_ops * ops;
43375 struct mutex mutex;
43376 int needs_read_fill;
43377 int event;
43378 @@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
43379 {
43380 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
43381 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43382 - struct sysfs_ops * ops = buffer->ops;
43383 + const struct sysfs_ops * ops = buffer->ops;
43384 int ret = 0;
43385 ssize_t count;
43386
43387 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
43388 if (!sysfs_get_active_two(attr_sd))
43389 return -ENODEV;
43390
43391 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
43392 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
43393 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
43394
43395 sysfs_put_active_two(attr_sd);
43396 @@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
43397 {
43398 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
43399 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43400 - struct sysfs_ops * ops = buffer->ops;
43401 + const struct sysfs_ops * ops = buffer->ops;
43402 int rc;
43403
43404 /* need attr_sd for attr and ops, its parent for kobj */
43405 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
43406 return -ENOMEM;
43407
43408 atomic_set(&new_od->refcnt, 0);
43409 - atomic_set(&new_od->event, 1);
43410 + atomic_set_unchecked(&new_od->event, 1);
43411 init_waitqueue_head(&new_od->poll);
43412 INIT_LIST_HEAD(&new_od->buffers);
43413 goto retry;
43414 @@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
43415 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
43416 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
43417 struct sysfs_buffer *buffer;
43418 - struct sysfs_ops *ops;
43419 + const struct sysfs_ops *ops;
43420 int error = -EACCES;
43421 char *p;
43422
43423 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
43424
43425 sysfs_put_active_two(attr_sd);
43426
43427 - if (buffer->event != atomic_read(&od->event))
43428 + if (buffer->event != atomic_read_unchecked(&od->event))
43429 goto trigger;
43430
43431 return DEFAULT_POLLMASK;
43432 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
43433
43434 od = sd->s_attr.open;
43435 if (od) {
43436 - atomic_inc(&od->event);
43437 + atomic_inc_unchecked(&od->event);
43438 wake_up_interruptible(&od->poll);
43439 }
43440
43441 diff -urNp linux-2.6.32.42/fs/sysfs/mount.c linux-2.6.32.42/fs/sysfs/mount.c
43442 --- linux-2.6.32.42/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
43443 +++ linux-2.6.32.42/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
43444 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
43445 .s_name = "",
43446 .s_count = ATOMIC_INIT(1),
43447 .s_flags = SYSFS_DIR,
43448 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
43449 + .s_mode = S_IFDIR | S_IRWXU,
43450 +#else
43451 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
43452 +#endif
43453 .s_ino = 1,
43454 };
43455
43456 diff -urNp linux-2.6.32.42/fs/sysfs/symlink.c linux-2.6.32.42/fs/sysfs/symlink.c
43457 --- linux-2.6.32.42/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
43458 +++ linux-2.6.32.42/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
43459 @@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
43460
43461 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
43462 {
43463 - char *page = nd_get_link(nd);
43464 + const char *page = nd_get_link(nd);
43465 if (!IS_ERR(page))
43466 free_page((unsigned long)page);
43467 }
43468 diff -urNp linux-2.6.32.42/fs/udf/balloc.c linux-2.6.32.42/fs/udf/balloc.c
43469 --- linux-2.6.32.42/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
43470 +++ linux-2.6.32.42/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
43471 @@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
43472
43473 mutex_lock(&sbi->s_alloc_mutex);
43474 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
43475 - if (bloc->logicalBlockNum < 0 ||
43476 - (bloc->logicalBlockNum + count) >
43477 - partmap->s_partition_len) {
43478 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
43479 udf_debug("%d < %d || %d + %d > %d\n",
43480 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
43481 count, partmap->s_partition_len);
43482 @@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
43483
43484 mutex_lock(&sbi->s_alloc_mutex);
43485 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
43486 - if (bloc->logicalBlockNum < 0 ||
43487 - (bloc->logicalBlockNum + count) >
43488 - partmap->s_partition_len) {
43489 + if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
43490 udf_debug("%d < %d || %d + %d > %d\n",
43491 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
43492 partmap->s_partition_len);
43493 diff -urNp linux-2.6.32.42/fs/udf/inode.c linux-2.6.32.42/fs/udf/inode.c
43494 --- linux-2.6.32.42/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
43495 +++ linux-2.6.32.42/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
43496 @@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
43497 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
43498 int lastblock = 0;
43499
43500 + pax_track_stack();
43501 +
43502 prev_epos.offset = udf_file_entry_alloc_offset(inode);
43503 prev_epos.block = iinfo->i_location;
43504 prev_epos.bh = NULL;
43505 diff -urNp linux-2.6.32.42/fs/udf/misc.c linux-2.6.32.42/fs/udf/misc.c
43506 --- linux-2.6.32.42/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
43507 +++ linux-2.6.32.42/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
43508 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
43509
43510 u8 udf_tag_checksum(const struct tag *t)
43511 {
43512 - u8 *data = (u8 *)t;
43513 + const u8 *data = (const u8 *)t;
43514 u8 checksum = 0;
43515 int i;
43516 for (i = 0; i < sizeof(struct tag); ++i)
43517 diff -urNp linux-2.6.32.42/fs/utimes.c linux-2.6.32.42/fs/utimes.c
43518 --- linux-2.6.32.42/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
43519 +++ linux-2.6.32.42/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
43520 @@ -1,6 +1,7 @@
43521 #include <linux/compiler.h>
43522 #include <linux/file.h>
43523 #include <linux/fs.h>
43524 +#include <linux/security.h>
43525 #include <linux/linkage.h>
43526 #include <linux/mount.h>
43527 #include <linux/namei.h>
43528 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
43529 goto mnt_drop_write_and_out;
43530 }
43531 }
43532 +
43533 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
43534 + error = -EACCES;
43535 + goto mnt_drop_write_and_out;
43536 + }
43537 +
43538 mutex_lock(&inode->i_mutex);
43539 error = notify_change(path->dentry, &newattrs);
43540 mutex_unlock(&inode->i_mutex);
43541 diff -urNp linux-2.6.32.42/fs/xattr_acl.c linux-2.6.32.42/fs/xattr_acl.c
43542 --- linux-2.6.32.42/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
43543 +++ linux-2.6.32.42/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
43544 @@ -17,8 +17,8 @@
43545 struct posix_acl *
43546 posix_acl_from_xattr(const void *value, size_t size)
43547 {
43548 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
43549 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
43550 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
43551 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
43552 int count;
43553 struct posix_acl *acl;
43554 struct posix_acl_entry *acl_e;
43555 diff -urNp linux-2.6.32.42/fs/xattr.c linux-2.6.32.42/fs/xattr.c
43556 --- linux-2.6.32.42/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
43557 +++ linux-2.6.32.42/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
43558 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
43559 * Extended attribute SET operations
43560 */
43561 static long
43562 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
43563 +setxattr(struct path *path, const char __user *name, const void __user *value,
43564 size_t size, int flags)
43565 {
43566 int error;
43567 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
43568 return PTR_ERR(kvalue);
43569 }
43570
43571 - error = vfs_setxattr(d, kname, kvalue, size, flags);
43572 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
43573 + error = -EACCES;
43574 + goto out;
43575 + }
43576 +
43577 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
43578 +out:
43579 kfree(kvalue);
43580 return error;
43581 }
43582 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
43583 return error;
43584 error = mnt_want_write(path.mnt);
43585 if (!error) {
43586 - error = setxattr(path.dentry, name, value, size, flags);
43587 + error = setxattr(&path, name, value, size, flags);
43588 mnt_drop_write(path.mnt);
43589 }
43590 path_put(&path);
43591 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
43592 return error;
43593 error = mnt_want_write(path.mnt);
43594 if (!error) {
43595 - error = setxattr(path.dentry, name, value, size, flags);
43596 + error = setxattr(&path, name, value, size, flags);
43597 mnt_drop_write(path.mnt);
43598 }
43599 path_put(&path);
43600 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
43601 const void __user *,value, size_t, size, int, flags)
43602 {
43603 struct file *f;
43604 - struct dentry *dentry;
43605 int error = -EBADF;
43606
43607 f = fget(fd);
43608 if (!f)
43609 return error;
43610 - dentry = f->f_path.dentry;
43611 - audit_inode(NULL, dentry);
43612 + audit_inode(NULL, f->f_path.dentry);
43613 error = mnt_want_write_file(f);
43614 if (!error) {
43615 - error = setxattr(dentry, name, value, size, flags);
43616 + error = setxattr(&f->f_path, name, value, size, flags);
43617 mnt_drop_write(f->f_path.mnt);
43618 }
43619 fput(f);
43620 diff -urNp linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c
43621 --- linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
43622 +++ linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
43623 @@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
43624 xfs_fsop_geom_t fsgeo;
43625 int error;
43626
43627 + memset(&fsgeo, 0, sizeof(fsgeo));
43628 error = xfs_fs_geometry(mp, &fsgeo, 3);
43629 if (error)
43630 return -error;
43631 diff -urNp linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c
43632 --- linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
43633 +++ linux-2.6.32.42/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
43634 @@ -134,7 +134,7 @@ xfs_find_handle(
43635 }
43636
43637 error = -EFAULT;
43638 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
43639 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
43640 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
43641 goto out_put;
43642
43643 @@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
43644 if (IS_ERR(dentry))
43645 return PTR_ERR(dentry);
43646
43647 - kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
43648 + kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
43649 if (!kbuf)
43650 goto out_dput;
43651
43652 @@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
43653 xfs_mount_t *mp,
43654 void __user *arg)
43655 {
43656 - xfs_fsop_geom_t fsgeo;
43657 + xfs_fsop_geom_t fsgeo;
43658 int error;
43659
43660 error = xfs_fs_geometry(mp, &fsgeo, 3);
43661 diff -urNp linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c
43662 --- linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
43663 +++ linux-2.6.32.42/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
43664 @@ -468,7 +468,7 @@ xfs_vn_put_link(
43665 struct nameidata *nd,
43666 void *p)
43667 {
43668 - char *s = nd_get_link(nd);
43669 + const char *s = nd_get_link(nd);
43670
43671 if (!IS_ERR(s))
43672 kfree(s);
43673 diff -urNp linux-2.6.32.42/fs/xfs/xfs_bmap.c linux-2.6.32.42/fs/xfs/xfs_bmap.c
43674 --- linux-2.6.32.42/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
43675 +++ linux-2.6.32.42/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
43676 @@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
43677 int nmap,
43678 int ret_nmap);
43679 #else
43680 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
43681 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
43682 #endif /* DEBUG */
43683
43684 #if defined(XFS_RW_TRACE)
43685 diff -urNp linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c
43686 --- linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
43687 +++ linux-2.6.32.42/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
43688 @@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
43689 }
43690
43691 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
43692 - if (filldir(dirent, sfep->name, sfep->namelen,
43693 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
43694 + char name[sfep->namelen];
43695 + memcpy(name, sfep->name, sfep->namelen);
43696 + if (filldir(dirent, name, sfep->namelen,
43697 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
43698 + *offset = off & 0x7fffffff;
43699 + return 0;
43700 + }
43701 + } else if (filldir(dirent, sfep->name, sfep->namelen,
43702 off & 0x7fffffff, ino, DT_UNKNOWN)) {
43703 *offset = off & 0x7fffffff;
43704 return 0;
43705 diff -urNp linux-2.6.32.42/grsecurity/gracl_alloc.c linux-2.6.32.42/grsecurity/gracl_alloc.c
43706 --- linux-2.6.32.42/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
43707 +++ linux-2.6.32.42/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
43708 @@ -0,0 +1,105 @@
43709 +#include <linux/kernel.h>
43710 +#include <linux/mm.h>
43711 +#include <linux/slab.h>
43712 +#include <linux/vmalloc.h>
43713 +#include <linux/gracl.h>
43714 +#include <linux/grsecurity.h>
43715 +
43716 +static unsigned long alloc_stack_next = 1;
43717 +static unsigned long alloc_stack_size = 1;
43718 +static void **alloc_stack;
43719 +
43720 +static __inline__ int
43721 +alloc_pop(void)
43722 +{
43723 + if (alloc_stack_next == 1)
43724 + return 0;
43725 +
43726 + kfree(alloc_stack[alloc_stack_next - 2]);
43727 +
43728 + alloc_stack_next--;
43729 +
43730 + return 1;
43731 +}
43732 +
43733 +static __inline__ int
43734 +alloc_push(void *buf)
43735 +{
43736 + if (alloc_stack_next >= alloc_stack_size)
43737 + return 1;
43738 +
43739 + alloc_stack[alloc_stack_next - 1] = buf;
43740 +
43741 + alloc_stack_next++;
43742 +
43743 + return 0;
43744 +}
43745 +
43746 +void *
43747 +acl_alloc(unsigned long len)
43748 +{
43749 + void *ret = NULL;
43750 +
43751 + if (!len || len > PAGE_SIZE)
43752 + goto out;
43753 +
43754 + ret = kmalloc(len, GFP_KERNEL);
43755 +
43756 + if (ret) {
43757 + if (alloc_push(ret)) {
43758 + kfree(ret);
43759 + ret = NULL;
43760 + }
43761 + }
43762 +
43763 +out:
43764 + return ret;
43765 +}
43766 +
43767 +void *
43768 +acl_alloc_num(unsigned long num, unsigned long len)
43769 +{
43770 + if (!len || (num > (PAGE_SIZE / len)))
43771 + return NULL;
43772 +
43773 + return acl_alloc(num * len);
43774 +}
43775 +
43776 +void
43777 +acl_free_all(void)
43778 +{
43779 + if (gr_acl_is_enabled() || !alloc_stack)
43780 + return;
43781 +
43782 + while (alloc_pop()) ;
43783 +
43784 + if (alloc_stack) {
43785 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
43786 + kfree(alloc_stack);
43787 + else
43788 + vfree(alloc_stack);
43789 + }
43790 +
43791 + alloc_stack = NULL;
43792 + alloc_stack_size = 1;
43793 + alloc_stack_next = 1;
43794 +
43795 + return;
43796 +}
43797 +
43798 +int
43799 +acl_alloc_stack_init(unsigned long size)
43800 +{
43801 + if ((size * sizeof (void *)) <= PAGE_SIZE)
43802 + alloc_stack =
43803 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
43804 + else
43805 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
43806 +
43807 + alloc_stack_size = size;
43808 +
43809 + if (!alloc_stack)
43810 + return 0;
43811 + else
43812 + return 1;
43813 +}
43814 diff -urNp linux-2.6.32.42/grsecurity/gracl.c linux-2.6.32.42/grsecurity/gracl.c
43815 --- linux-2.6.32.42/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
43816 +++ linux-2.6.32.42/grsecurity/gracl.c 2011-06-11 16:24:26.000000000 -0400
43817 @@ -0,0 +1,4085 @@
43818 +#include <linux/kernel.h>
43819 +#include <linux/module.h>
43820 +#include <linux/sched.h>
43821 +#include <linux/mm.h>
43822 +#include <linux/file.h>
43823 +#include <linux/fs.h>
43824 +#include <linux/namei.h>
43825 +#include <linux/mount.h>
43826 +#include <linux/tty.h>
43827 +#include <linux/proc_fs.h>
43828 +#include <linux/smp_lock.h>
43829 +#include <linux/slab.h>
43830 +#include <linux/vmalloc.h>
43831 +#include <linux/types.h>
43832 +#include <linux/sysctl.h>
43833 +#include <linux/netdevice.h>
43834 +#include <linux/ptrace.h>
43835 +#include <linux/gracl.h>
43836 +#include <linux/gralloc.h>
43837 +#include <linux/grsecurity.h>
43838 +#include <linux/grinternal.h>
43839 +#include <linux/pid_namespace.h>
43840 +#include <linux/fdtable.h>
43841 +#include <linux/percpu.h>
43842 +
43843 +#include <asm/uaccess.h>
43844 +#include <asm/errno.h>
43845 +#include <asm/mman.h>
43846 +
43847 +static struct acl_role_db acl_role_set;
43848 +static struct name_db name_set;
43849 +static struct inodev_db inodev_set;
43850 +
43851 +/* for keeping track of userspace pointers used for subjects, so we
43852 + can share references in the kernel as well
43853 +*/
43854 +
43855 +static struct dentry *real_root;
43856 +static struct vfsmount *real_root_mnt;
43857 +
43858 +static struct acl_subj_map_db subj_map_set;
43859 +
43860 +static struct acl_role_label *default_role;
43861 +
43862 +static struct acl_role_label *role_list;
43863 +
43864 +static u16 acl_sp_role_value;
43865 +
43866 +extern char *gr_shared_page[4];
43867 +static DEFINE_MUTEX(gr_dev_mutex);
43868 +DEFINE_RWLOCK(gr_inode_lock);
43869 +
43870 +struct gr_arg *gr_usermode;
43871 +
43872 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
43873 +
43874 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
43875 +extern void gr_clear_learn_entries(void);
43876 +
43877 +#ifdef CONFIG_GRKERNSEC_RESLOG
43878 +extern void gr_log_resource(const struct task_struct *task,
43879 + const int res, const unsigned long wanted, const int gt);
43880 +#endif
43881 +
43882 +unsigned char *gr_system_salt;
43883 +unsigned char *gr_system_sum;
43884 +
43885 +static struct sprole_pw **acl_special_roles = NULL;
43886 +static __u16 num_sprole_pws = 0;
43887 +
43888 +static struct acl_role_label *kernel_role = NULL;
43889 +
43890 +static unsigned int gr_auth_attempts = 0;
43891 +static unsigned long gr_auth_expires = 0UL;
43892 +
43893 +#ifdef CONFIG_NET
43894 +extern struct vfsmount *sock_mnt;
43895 +#endif
43896 +extern struct vfsmount *pipe_mnt;
43897 +extern struct vfsmount *shm_mnt;
43898 +#ifdef CONFIG_HUGETLBFS
43899 +extern struct vfsmount *hugetlbfs_vfsmount;
43900 +#endif
43901 +
43902 +static struct acl_object_label *fakefs_obj_rw;
43903 +static struct acl_object_label *fakefs_obj_rwx;
43904 +
43905 +extern int gr_init_uidset(void);
43906 +extern void gr_free_uidset(void);
43907 +extern void gr_remove_uid(uid_t uid);
43908 +extern int gr_find_uid(uid_t uid);
43909 +
43910 +__inline__ int
43911 +gr_acl_is_enabled(void)
43912 +{
43913 + return (gr_status & GR_READY);
43914 +}
43915 +
43916 +#ifdef CONFIG_BTRFS_FS
43917 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
43918 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
43919 +#endif
43920 +
43921 +static inline dev_t __get_dev(const struct dentry *dentry)
43922 +{
43923 +#ifdef CONFIG_BTRFS_FS
43924 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
43925 + return get_btrfs_dev_from_inode(dentry->d_inode);
43926 + else
43927 +#endif
43928 + return dentry->d_inode->i_sb->s_dev;
43929 +}
43930 +
43931 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
43932 +{
43933 + return __get_dev(dentry);
43934 +}
43935 +
43936 +static char gr_task_roletype_to_char(struct task_struct *task)
43937 +{
43938 + switch (task->role->roletype &
43939 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
43940 + GR_ROLE_SPECIAL)) {
43941 + case GR_ROLE_DEFAULT:
43942 + return 'D';
43943 + case GR_ROLE_USER:
43944 + return 'U';
43945 + case GR_ROLE_GROUP:
43946 + return 'G';
43947 + case GR_ROLE_SPECIAL:
43948 + return 'S';
43949 + }
43950 +
43951 + return 'X';
43952 +}
43953 +
43954 +char gr_roletype_to_char(void)
43955 +{
43956 + return gr_task_roletype_to_char(current);
43957 +}
43958 +
43959 +__inline__ int
43960 +gr_acl_tpe_check(void)
43961 +{
43962 + if (unlikely(!(gr_status & GR_READY)))
43963 + return 0;
43964 + if (current->role->roletype & GR_ROLE_TPE)
43965 + return 1;
43966 + else
43967 + return 0;
43968 +}
43969 +
43970 +int
43971 +gr_handle_rawio(const struct inode *inode)
43972 +{
43973 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
43974 + if (inode && S_ISBLK(inode->i_mode) &&
43975 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
43976 + !capable(CAP_SYS_RAWIO))
43977 + return 1;
43978 +#endif
43979 + return 0;
43980 +}
43981 +
43982 +static int
43983 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
43984 +{
43985 + if (likely(lena != lenb))
43986 + return 0;
43987 +
43988 + return !memcmp(a, b, lena);
43989 +}
43990 +
43991 +/* this must be called with vfsmount_lock and dcache_lock held */
43992 +
43993 +static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
43994 + struct dentry *root, struct vfsmount *rootmnt,
43995 + char *buffer, int buflen)
43996 +{
43997 + char * end = buffer+buflen;
43998 + char * retval;
43999 + int namelen;
44000 +
44001 + *--end = '\0';
44002 + buflen--;
44003 +
44004 + if (buflen < 1)
44005 + goto Elong;
44006 + /* Get '/' right */
44007 + retval = end-1;
44008 + *retval = '/';
44009 +
44010 + for (;;) {
44011 + struct dentry * parent;
44012 +
44013 + if (dentry == root && vfsmnt == rootmnt)
44014 + break;
44015 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
44016 + /* Global root? */
44017 + if (vfsmnt->mnt_parent == vfsmnt)
44018 + goto global_root;
44019 + dentry = vfsmnt->mnt_mountpoint;
44020 + vfsmnt = vfsmnt->mnt_parent;
44021 + continue;
44022 + }
44023 + parent = dentry->d_parent;
44024 + prefetch(parent);
44025 + namelen = dentry->d_name.len;
44026 + buflen -= namelen + 1;
44027 + if (buflen < 0)
44028 + goto Elong;
44029 + end -= namelen;
44030 + memcpy(end, dentry->d_name.name, namelen);
44031 + *--end = '/';
44032 + retval = end;
44033 + dentry = parent;
44034 + }
44035 +
44036 +out:
44037 + return retval;
44038 +
44039 +global_root:
44040 + namelen = dentry->d_name.len;
44041 + buflen -= namelen;
44042 + if (buflen < 0)
44043 + goto Elong;
44044 + retval -= namelen-1; /* hit the slash */
44045 + memcpy(retval, dentry->d_name.name, namelen);
44046 + goto out;
44047 +Elong:
44048 + retval = ERR_PTR(-ENAMETOOLONG);
44049 + goto out;
44050 +}
44051 +
44052 +static char *
44053 +gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
44054 + struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
44055 +{
44056 + char *retval;
44057 +
44058 + retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
44059 + if (unlikely(IS_ERR(retval)))
44060 + retval = strcpy(buf, "<path too long>");
44061 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
44062 + retval[1] = '\0';
44063 +
44064 + return retval;
44065 +}
44066 +
44067 +static char *
44068 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44069 + char *buf, int buflen)
44070 +{
44071 + char *res;
44072 +
44073 + /* we can use real_root, real_root_mnt, because this is only called
44074 + by the RBAC system */
44075 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
44076 +
44077 + return res;
44078 +}
44079 +
44080 +static char *
44081 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44082 + char *buf, int buflen)
44083 +{
44084 + char *res;
44085 + struct dentry *root;
44086 + struct vfsmount *rootmnt;
44087 + struct task_struct *reaper = &init_task;
44088 +
44089 + /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
44090 + read_lock(&reaper->fs->lock);
44091 + root = dget(reaper->fs->root.dentry);
44092 + rootmnt = mntget(reaper->fs->root.mnt);
44093 + read_unlock(&reaper->fs->lock);
44094 +
44095 + spin_lock(&dcache_lock);
44096 + spin_lock(&vfsmount_lock);
44097 + res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
44098 + spin_unlock(&vfsmount_lock);
44099 + spin_unlock(&dcache_lock);
44100 +
44101 + dput(root);
44102 + mntput(rootmnt);
44103 + return res;
44104 +}
44105 +
44106 +static char *
44107 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
44108 +{
44109 + char *ret;
44110 + spin_lock(&dcache_lock);
44111 + spin_lock(&vfsmount_lock);
44112 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44113 + PAGE_SIZE);
44114 + spin_unlock(&vfsmount_lock);
44115 + spin_unlock(&dcache_lock);
44116 + return ret;
44117 +}
44118 +
44119 +char *
44120 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
44121 +{
44122 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44123 + PAGE_SIZE);
44124 +}
44125 +
44126 +char *
44127 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
44128 +{
44129 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
44130 + PAGE_SIZE);
44131 +}
44132 +
44133 +char *
44134 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
44135 +{
44136 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
44137 + PAGE_SIZE);
44138 +}
44139 +
44140 +char *
44141 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
44142 +{
44143 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
44144 + PAGE_SIZE);
44145 +}
44146 +
44147 +char *
44148 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
44149 +{
44150 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
44151 + PAGE_SIZE);
44152 +}
44153 +
44154 +__inline__ __u32
44155 +to_gr_audit(const __u32 reqmode)
44156 +{
44157 + /* masks off auditable permission flags, then shifts them to create
44158 + auditing flags, and adds the special case of append auditing if
44159 + we're requesting write */
44160 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
44161 +}
44162 +
44163 +struct acl_subject_label *
44164 +lookup_subject_map(const struct acl_subject_label *userp)
44165 +{
44166 + unsigned int index = shash(userp, subj_map_set.s_size);
44167 + struct subject_map *match;
44168 +
44169 + match = subj_map_set.s_hash[index];
44170 +
44171 + while (match && match->user != userp)
44172 + match = match->next;
44173 +
44174 + if (match != NULL)
44175 + return match->kernel;
44176 + else
44177 + return NULL;
44178 +}
44179 +
44180 +static void
44181 +insert_subj_map_entry(struct subject_map *subjmap)
44182 +{
44183 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
44184 + struct subject_map **curr;
44185 +
44186 + subjmap->prev = NULL;
44187 +
44188 + curr = &subj_map_set.s_hash[index];
44189 + if (*curr != NULL)
44190 + (*curr)->prev = subjmap;
44191 +
44192 + subjmap->next = *curr;
44193 + *curr = subjmap;
44194 +
44195 + return;
44196 +}
44197 +
44198 +static struct acl_role_label *
44199 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
44200 + const gid_t gid)
44201 +{
44202 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
44203 + struct acl_role_label *match;
44204 + struct role_allowed_ip *ipp;
44205 + unsigned int x;
44206 + u32 curr_ip = task->signal->curr_ip;
44207 +
44208 + task->signal->saved_ip = curr_ip;
44209 +
44210 + match = acl_role_set.r_hash[index];
44211 +
44212 + while (match) {
44213 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
44214 + for (x = 0; x < match->domain_child_num; x++) {
44215 + if (match->domain_children[x] == uid)
44216 + goto found;
44217 + }
44218 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
44219 + break;
44220 + match = match->next;
44221 + }
44222 +found:
44223 + if (match == NULL) {
44224 + try_group:
44225 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
44226 + match = acl_role_set.r_hash[index];
44227 +
44228 + while (match) {
44229 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
44230 + for (x = 0; x < match->domain_child_num; x++) {
44231 + if (match->domain_children[x] == gid)
44232 + goto found2;
44233 + }
44234 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
44235 + break;
44236 + match = match->next;
44237 + }
44238 +found2:
44239 + if (match == NULL)
44240 + match = default_role;
44241 + if (match->allowed_ips == NULL)
44242 + return match;
44243 + else {
44244 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44245 + if (likely
44246 + ((ntohl(curr_ip) & ipp->netmask) ==
44247 + (ntohl(ipp->addr) & ipp->netmask)))
44248 + return match;
44249 + }
44250 + match = default_role;
44251 + }
44252 + } else if (match->allowed_ips == NULL) {
44253 + return match;
44254 + } else {
44255 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44256 + if (likely
44257 + ((ntohl(curr_ip) & ipp->netmask) ==
44258 + (ntohl(ipp->addr) & ipp->netmask)))
44259 + return match;
44260 + }
44261 + goto try_group;
44262 + }
44263 +
44264 + return match;
44265 +}
44266 +
44267 +struct acl_subject_label *
44268 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
44269 + const struct acl_role_label *role)
44270 +{
44271 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44272 + struct acl_subject_label *match;
44273 +
44274 + match = role->subj_hash[index];
44275 +
44276 + while (match && (match->inode != ino || match->device != dev ||
44277 + (match->mode & GR_DELETED))) {
44278 + match = match->next;
44279 + }
44280 +
44281 + if (match && !(match->mode & GR_DELETED))
44282 + return match;
44283 + else
44284 + return NULL;
44285 +}
44286 +
44287 +struct acl_subject_label *
44288 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
44289 + const struct acl_role_label *role)
44290 +{
44291 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
44292 + struct acl_subject_label *match;
44293 +
44294 + match = role->subj_hash[index];
44295 +
44296 + while (match && (match->inode != ino || match->device != dev ||
44297 + !(match->mode & GR_DELETED))) {
44298 + match = match->next;
44299 + }
44300 +
44301 + if (match && (match->mode & GR_DELETED))
44302 + return match;
44303 + else
44304 + return NULL;
44305 +}
44306 +
44307 +static struct acl_object_label *
44308 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
44309 + const struct acl_subject_label *subj)
44310 +{
44311 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44312 + struct acl_object_label *match;
44313 +
44314 + match = subj->obj_hash[index];
44315 +
44316 + while (match && (match->inode != ino || match->device != dev ||
44317 + (match->mode & GR_DELETED))) {
44318 + match = match->next;
44319 + }
44320 +
44321 + if (match && !(match->mode & GR_DELETED))
44322 + return match;
44323 + else
44324 + return NULL;
44325 +}
44326 +
44327 +static struct acl_object_label *
44328 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
44329 + const struct acl_subject_label *subj)
44330 +{
44331 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
44332 + struct acl_object_label *match;
44333 +
44334 + match = subj->obj_hash[index];
44335 +
44336 + while (match && (match->inode != ino || match->device != dev ||
44337 + !(match->mode & GR_DELETED))) {
44338 + match = match->next;
44339 + }
44340 +
44341 + if (match && (match->mode & GR_DELETED))
44342 + return match;
44343 +
44344 + match = subj->obj_hash[index];
44345 +
44346 + while (match && (match->inode != ino || match->device != dev ||
44347 + (match->mode & GR_DELETED))) {
44348 + match = match->next;
44349 + }
44350 +
44351 + if (match && !(match->mode & GR_DELETED))
44352 + return match;
44353 + else
44354 + return NULL;
44355 +}
44356 +
44357 +static struct name_entry *
44358 +lookup_name_entry(const char *name)
44359 +{
44360 + unsigned int len = strlen(name);
44361 + unsigned int key = full_name_hash(name, len);
44362 + unsigned int index = key % name_set.n_size;
44363 + struct name_entry *match;
44364 +
44365 + match = name_set.n_hash[index];
44366 +
44367 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
44368 + match = match->next;
44369 +
44370 + return match;
44371 +}
44372 +
44373 +static struct name_entry *
44374 +lookup_name_entry_create(const char *name)
44375 +{
44376 + unsigned int len = strlen(name);
44377 + unsigned int key = full_name_hash(name, len);
44378 + unsigned int index = key % name_set.n_size;
44379 + struct name_entry *match;
44380 +
44381 + match = name_set.n_hash[index];
44382 +
44383 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44384 + !match->deleted))
44385 + match = match->next;
44386 +
44387 + if (match && match->deleted)
44388 + return match;
44389 +
44390 + match = name_set.n_hash[index];
44391 +
44392 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
44393 + match->deleted))
44394 + match = match->next;
44395 +
44396 + if (match && !match->deleted)
44397 + return match;
44398 + else
44399 + return NULL;
44400 +}
44401 +
44402 +static struct inodev_entry *
44403 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
44404 +{
44405 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
44406 + struct inodev_entry *match;
44407 +
44408 + match = inodev_set.i_hash[index];
44409 +
44410 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
44411 + match = match->next;
44412 +
44413 + return match;
44414 +}
44415 +
44416 +static void
44417 +insert_inodev_entry(struct inodev_entry *entry)
44418 +{
44419 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
44420 + inodev_set.i_size);
44421 + struct inodev_entry **curr;
44422 +
44423 + entry->prev = NULL;
44424 +
44425 + curr = &inodev_set.i_hash[index];
44426 + if (*curr != NULL)
44427 + (*curr)->prev = entry;
44428 +
44429 + entry->next = *curr;
44430 + *curr = entry;
44431 +
44432 + return;
44433 +}
44434 +
44435 +static void
44436 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
44437 +{
44438 + unsigned int index =
44439 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
44440 + struct acl_role_label **curr;
44441 + struct acl_role_label *tmp;
44442 +
44443 + curr = &acl_role_set.r_hash[index];
44444 +
44445 + /* if role was already inserted due to domains and already has
44446 + a role in the same bucket as it attached, then we need to
44447 + combine these two buckets
44448 + */
44449 + if (role->next) {
44450 + tmp = role->next;
44451 + while (tmp->next)
44452 + tmp = tmp->next;
44453 + tmp->next = *curr;
44454 + } else
44455 + role->next = *curr;
44456 + *curr = role;
44457 +
44458 + return;
44459 +}
44460 +
44461 +static void
44462 +insert_acl_role_label(struct acl_role_label *role)
44463 +{
44464 + int i;
44465 +
44466 + if (role_list == NULL) {
44467 + role_list = role;
44468 + role->prev = NULL;
44469 + } else {
44470 + role->prev = role_list;
44471 + role_list = role;
44472 + }
44473 +
44474 + /* used for hash chains */
44475 + role->next = NULL;
44476 +
44477 + if (role->roletype & GR_ROLE_DOMAIN) {
44478 + for (i = 0; i < role->domain_child_num; i++)
44479 + __insert_acl_role_label(role, role->domain_children[i]);
44480 + } else
44481 + __insert_acl_role_label(role, role->uidgid);
44482 +}
44483 +
44484 +static int
44485 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
44486 +{
44487 + struct name_entry **curr, *nentry;
44488 + struct inodev_entry *ientry;
44489 + unsigned int len = strlen(name);
44490 + unsigned int key = full_name_hash(name, len);
44491 + unsigned int index = key % name_set.n_size;
44492 +
44493 + curr = &name_set.n_hash[index];
44494 +
44495 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
44496 + curr = &((*curr)->next);
44497 +
44498 + if (*curr != NULL)
44499 + return 1;
44500 +
44501 + nentry = acl_alloc(sizeof (struct name_entry));
44502 + if (nentry == NULL)
44503 + return 0;
44504 + ientry = acl_alloc(sizeof (struct inodev_entry));
44505 + if (ientry == NULL)
44506 + return 0;
44507 + ientry->nentry = nentry;
44508 +
44509 + nentry->key = key;
44510 + nentry->name = name;
44511 + nentry->inode = inode;
44512 + nentry->device = device;
44513 + nentry->len = len;
44514 + nentry->deleted = deleted;
44515 +
44516 + nentry->prev = NULL;
44517 + curr = &name_set.n_hash[index];
44518 + if (*curr != NULL)
44519 + (*curr)->prev = nentry;
44520 + nentry->next = *curr;
44521 + *curr = nentry;
44522 +
44523 + /* insert us into the table searchable by inode/dev */
44524 + insert_inodev_entry(ientry);
44525 +
44526 + return 1;
44527 +}
44528 +
44529 +static void
44530 +insert_acl_obj_label(struct acl_object_label *obj,
44531 + struct acl_subject_label *subj)
44532 +{
44533 + unsigned int index =
44534 + fhash(obj->inode, obj->device, subj->obj_hash_size);
44535 + struct acl_object_label **curr;
44536 +
44537 +
44538 + obj->prev = NULL;
44539 +
44540 + curr = &subj->obj_hash[index];
44541 + if (*curr != NULL)
44542 + (*curr)->prev = obj;
44543 +
44544 + obj->next = *curr;
44545 + *curr = obj;
44546 +
44547 + return;
44548 +}
44549 +
44550 +static void
44551 +insert_acl_subj_label(struct acl_subject_label *obj,
44552 + struct acl_role_label *role)
44553 +{
44554 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
44555 + struct acl_subject_label **curr;
44556 +
44557 + obj->prev = NULL;
44558 +
44559 + curr = &role->subj_hash[index];
44560 + if (*curr != NULL)
44561 + (*curr)->prev = obj;
44562 +
44563 + obj->next = *curr;
44564 + *curr = obj;
44565 +
44566 + return;
44567 +}
44568 +
44569 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
44570 +
44571 +static void *
44572 +create_table(__u32 * len, int elementsize)
44573 +{
44574 + unsigned int table_sizes[] = {
44575 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
44576 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
44577 + 4194301, 8388593, 16777213, 33554393, 67108859
44578 + };
44579 + void *newtable = NULL;
44580 + unsigned int pwr = 0;
44581 +
44582 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
44583 + table_sizes[pwr] <= *len)
44584 + pwr++;
44585 +
44586 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
44587 + return newtable;
44588 +
44589 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
44590 + newtable =
44591 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
44592 + else
44593 + newtable = vmalloc(table_sizes[pwr] * elementsize);
44594 +
44595 + *len = table_sizes[pwr];
44596 +
44597 + return newtable;
44598 +}
44599 +
44600 +static int
44601 +init_variables(const struct gr_arg *arg)
44602 +{
44603 + struct task_struct *reaper = &init_task;
44604 + unsigned int stacksize;
44605 +
44606 + subj_map_set.s_size = arg->role_db.num_subjects;
44607 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
44608 + name_set.n_size = arg->role_db.num_objects;
44609 + inodev_set.i_size = arg->role_db.num_objects;
44610 +
44611 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
44612 + !name_set.n_size || !inodev_set.i_size)
44613 + return 1;
44614 +
44615 + if (!gr_init_uidset())
44616 + return 1;
44617 +
44618 + /* set up the stack that holds allocation info */
44619 +
44620 + stacksize = arg->role_db.num_pointers + 5;
44621 +
44622 + if (!acl_alloc_stack_init(stacksize))
44623 + return 1;
44624 +
44625 + /* grab reference for the real root dentry and vfsmount */
44626 + read_lock(&reaper->fs->lock);
44627 + real_root = dget(reaper->fs->root.dentry);
44628 + real_root_mnt = mntget(reaper->fs->root.mnt);
44629 + read_unlock(&reaper->fs->lock);
44630 +
44631 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44632 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
44633 +#endif
44634 +
44635 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
44636 + if (fakefs_obj_rw == NULL)
44637 + return 1;
44638 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
44639 +
44640 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
44641 + if (fakefs_obj_rwx == NULL)
44642 + return 1;
44643 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
44644 +
44645 + subj_map_set.s_hash =
44646 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
44647 + acl_role_set.r_hash =
44648 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
44649 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
44650 + inodev_set.i_hash =
44651 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
44652 +
44653 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
44654 + !name_set.n_hash || !inodev_set.i_hash)
44655 + return 1;
44656 +
44657 + memset(subj_map_set.s_hash, 0,
44658 + sizeof(struct subject_map *) * subj_map_set.s_size);
44659 + memset(acl_role_set.r_hash, 0,
44660 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
44661 + memset(name_set.n_hash, 0,
44662 + sizeof (struct name_entry *) * name_set.n_size);
44663 + memset(inodev_set.i_hash, 0,
44664 + sizeof (struct inodev_entry *) * inodev_set.i_size);
44665 +
44666 + return 0;
44667 +}
44668 +
44669 +/* free information not needed after startup
44670 + currently contains user->kernel pointer mappings for subjects
44671 +*/
44672 +
44673 +static void
44674 +free_init_variables(void)
44675 +{
44676 + __u32 i;
44677 +
44678 + if (subj_map_set.s_hash) {
44679 + for (i = 0; i < subj_map_set.s_size; i++) {
44680 + if (subj_map_set.s_hash[i]) {
44681 + kfree(subj_map_set.s_hash[i]);
44682 + subj_map_set.s_hash[i] = NULL;
44683 + }
44684 + }
44685 +
44686 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
44687 + PAGE_SIZE)
44688 + kfree(subj_map_set.s_hash);
44689 + else
44690 + vfree(subj_map_set.s_hash);
44691 + }
44692 +
44693 + return;
44694 +}
44695 +
44696 +static void
44697 +free_variables(void)
44698 +{
44699 + struct acl_subject_label *s;
44700 + struct acl_role_label *r;
44701 + struct task_struct *task, *task2;
44702 + unsigned int x;
44703 +
44704 + gr_clear_learn_entries();
44705 +
44706 + read_lock(&tasklist_lock);
44707 + do_each_thread(task2, task) {
44708 + task->acl_sp_role = 0;
44709 + task->acl_role_id = 0;
44710 + task->acl = NULL;
44711 + task->role = NULL;
44712 + } while_each_thread(task2, task);
44713 + read_unlock(&tasklist_lock);
44714 +
44715 + /* release the reference to the real root dentry and vfsmount */
44716 + if (real_root)
44717 + dput(real_root);
44718 + real_root = NULL;
44719 + if (real_root_mnt)
44720 + mntput(real_root_mnt);
44721 + real_root_mnt = NULL;
44722 +
44723 + /* free all object hash tables */
44724 +
44725 + FOR_EACH_ROLE_START(r)
44726 + if (r->subj_hash == NULL)
44727 + goto next_role;
44728 + FOR_EACH_SUBJECT_START(r, s, x)
44729 + if (s->obj_hash == NULL)
44730 + break;
44731 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
44732 + kfree(s->obj_hash);
44733 + else
44734 + vfree(s->obj_hash);
44735 + FOR_EACH_SUBJECT_END(s, x)
44736 + FOR_EACH_NESTED_SUBJECT_START(r, s)
44737 + if (s->obj_hash == NULL)
44738 + break;
44739 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
44740 + kfree(s->obj_hash);
44741 + else
44742 + vfree(s->obj_hash);
44743 + FOR_EACH_NESTED_SUBJECT_END(s)
44744 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
44745 + kfree(r->subj_hash);
44746 + else
44747 + vfree(r->subj_hash);
44748 + r->subj_hash = NULL;
44749 +next_role:
44750 + FOR_EACH_ROLE_END(r)
44751 +
44752 + acl_free_all();
44753 +
44754 + if (acl_role_set.r_hash) {
44755 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
44756 + PAGE_SIZE)
44757 + kfree(acl_role_set.r_hash);
44758 + else
44759 + vfree(acl_role_set.r_hash);
44760 + }
44761 + if (name_set.n_hash) {
44762 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
44763 + PAGE_SIZE)
44764 + kfree(name_set.n_hash);
44765 + else
44766 + vfree(name_set.n_hash);
44767 + }
44768 +
44769 + if (inodev_set.i_hash) {
44770 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
44771 + PAGE_SIZE)
44772 + kfree(inodev_set.i_hash);
44773 + else
44774 + vfree(inodev_set.i_hash);
44775 + }
44776 +
44777 + gr_free_uidset();
44778 +
44779 + memset(&name_set, 0, sizeof (struct name_db));
44780 + memset(&inodev_set, 0, sizeof (struct inodev_db));
44781 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
44782 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
44783 +
44784 + default_role = NULL;
44785 + role_list = NULL;
44786 +
44787 + return;
44788 +}
44789 +
44790 +static __u32
44791 +count_user_objs(struct acl_object_label *userp)
44792 +{
44793 + struct acl_object_label o_tmp;
44794 + __u32 num = 0;
44795 +
44796 + while (userp) {
44797 + if (copy_from_user(&o_tmp, userp,
44798 + sizeof (struct acl_object_label)))
44799 + break;
44800 +
44801 + userp = o_tmp.prev;
44802 + num++;
44803 + }
44804 +
44805 + return num;
44806 +}
44807 +
44808 +static struct acl_subject_label *
44809 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
44810 +
44811 +static int
44812 +copy_user_glob(struct acl_object_label *obj)
44813 +{
44814 + struct acl_object_label *g_tmp, **guser;
44815 + unsigned int len;
44816 + char *tmp;
44817 +
44818 + if (obj->globbed == NULL)
44819 + return 0;
44820 +
44821 + guser = &obj->globbed;
44822 + while (*guser) {
44823 + g_tmp = (struct acl_object_label *)
44824 + acl_alloc(sizeof (struct acl_object_label));
44825 + if (g_tmp == NULL)
44826 + return -ENOMEM;
44827 +
44828 + if (copy_from_user(g_tmp, *guser,
44829 + sizeof (struct acl_object_label)))
44830 + return -EFAULT;
44831 +
44832 + len = strnlen_user(g_tmp->filename, PATH_MAX);
44833 +
44834 + if (!len || len >= PATH_MAX)
44835 + return -EINVAL;
44836 +
44837 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44838 + return -ENOMEM;
44839 +
44840 + if (copy_from_user(tmp, g_tmp->filename, len))
44841 + return -EFAULT;
44842 + tmp[len-1] = '\0';
44843 + g_tmp->filename = tmp;
44844 +
44845 + *guser = g_tmp;
44846 + guser = &(g_tmp->next);
44847 + }
44848 +
44849 + return 0;
44850 +}
44851 +
44852 +static int
44853 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
44854 + struct acl_role_label *role)
44855 +{
44856 + struct acl_object_label *o_tmp;
44857 + unsigned int len;
44858 + int ret;
44859 + char *tmp;
44860 +
44861 + while (userp) {
44862 + if ((o_tmp = (struct acl_object_label *)
44863 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
44864 + return -ENOMEM;
44865 +
44866 + if (copy_from_user(o_tmp, userp,
44867 + sizeof (struct acl_object_label)))
44868 + return -EFAULT;
44869 +
44870 + userp = o_tmp->prev;
44871 +
44872 + len = strnlen_user(o_tmp->filename, PATH_MAX);
44873 +
44874 + if (!len || len >= PATH_MAX)
44875 + return -EINVAL;
44876 +
44877 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44878 + return -ENOMEM;
44879 +
44880 + if (copy_from_user(tmp, o_tmp->filename, len))
44881 + return -EFAULT;
44882 + tmp[len-1] = '\0';
44883 + o_tmp->filename = tmp;
44884 +
44885 + insert_acl_obj_label(o_tmp, subj);
44886 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
44887 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
44888 + return -ENOMEM;
44889 +
44890 + ret = copy_user_glob(o_tmp);
44891 + if (ret)
44892 + return ret;
44893 +
44894 + if (o_tmp->nested) {
44895 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
44896 + if (IS_ERR(o_tmp->nested))
44897 + return PTR_ERR(o_tmp->nested);
44898 +
44899 + /* insert into nested subject list */
44900 + o_tmp->nested->next = role->hash->first;
44901 + role->hash->first = o_tmp->nested;
44902 + }
44903 + }
44904 +
44905 + return 0;
44906 +}
44907 +
44908 +static __u32
44909 +count_user_subjs(struct acl_subject_label *userp)
44910 +{
44911 + struct acl_subject_label s_tmp;
44912 + __u32 num = 0;
44913 +
44914 + while (userp) {
44915 + if (copy_from_user(&s_tmp, userp,
44916 + sizeof (struct acl_subject_label)))
44917 + break;
44918 +
44919 + userp = s_tmp.prev;
44920 + /* do not count nested subjects against this count, since
44921 + they are not included in the hash table, but are
44922 + attached to objects. We have already counted
44923 + the subjects in userspace for the allocation
44924 + stack
44925 + */
44926 + if (!(s_tmp.mode & GR_NESTED))
44927 + num++;
44928 + }
44929 +
44930 + return num;
44931 +}
44932 +
44933 +static int
44934 +copy_user_allowedips(struct acl_role_label *rolep)
44935 +{
44936 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
44937 +
44938 + ruserip = rolep->allowed_ips;
44939 +
44940 + while (ruserip) {
44941 + rlast = rtmp;
44942 +
44943 + if ((rtmp = (struct role_allowed_ip *)
44944 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
44945 + return -ENOMEM;
44946 +
44947 + if (copy_from_user(rtmp, ruserip,
44948 + sizeof (struct role_allowed_ip)))
44949 + return -EFAULT;
44950 +
44951 + ruserip = rtmp->prev;
44952 +
44953 + if (!rlast) {
44954 + rtmp->prev = NULL;
44955 + rolep->allowed_ips = rtmp;
44956 + } else {
44957 + rlast->next = rtmp;
44958 + rtmp->prev = rlast;
44959 + }
44960 +
44961 + if (!ruserip)
44962 + rtmp->next = NULL;
44963 + }
44964 +
44965 + return 0;
44966 +}
44967 +
44968 +static int
44969 +copy_user_transitions(struct acl_role_label *rolep)
44970 +{
44971 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
44972 +
44973 + unsigned int len;
44974 + char *tmp;
44975 +
44976 + rusertp = rolep->transitions;
44977 +
44978 + while (rusertp) {
44979 + rlast = rtmp;
44980 +
44981 + if ((rtmp = (struct role_transition *)
44982 + acl_alloc(sizeof (struct role_transition))) == NULL)
44983 + return -ENOMEM;
44984 +
44985 + if (copy_from_user(rtmp, rusertp,
44986 + sizeof (struct role_transition)))
44987 + return -EFAULT;
44988 +
44989 + rusertp = rtmp->prev;
44990 +
44991 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
44992 +
44993 + if (!len || len >= GR_SPROLE_LEN)
44994 + return -EINVAL;
44995 +
44996 + if ((tmp = (char *) acl_alloc(len)) == NULL)
44997 + return -ENOMEM;
44998 +
44999 + if (copy_from_user(tmp, rtmp->rolename, len))
45000 + return -EFAULT;
45001 + tmp[len-1] = '\0';
45002 + rtmp->rolename = tmp;
45003 +
45004 + if (!rlast) {
45005 + rtmp->prev = NULL;
45006 + rolep->transitions = rtmp;
45007 + } else {
45008 + rlast->next = rtmp;
45009 + rtmp->prev = rlast;
45010 + }
45011 +
45012 + if (!rusertp)
45013 + rtmp->next = NULL;
45014 + }
45015 +
45016 + return 0;
45017 +}
45018 +
45019 +static struct acl_subject_label *
45020 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
45021 +{
45022 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
45023 + unsigned int len;
45024 + char *tmp;
45025 + __u32 num_objs;
45026 + struct acl_ip_label **i_tmp, *i_utmp2;
45027 + struct gr_hash_struct ghash;
45028 + struct subject_map *subjmap;
45029 + unsigned int i_num;
45030 + int err;
45031 +
45032 + s_tmp = lookup_subject_map(userp);
45033 +
45034 + /* we've already copied this subject into the kernel, just return
45035 + the reference to it, and don't copy it over again
45036 + */
45037 + if (s_tmp)
45038 + return(s_tmp);
45039 +
45040 + if ((s_tmp = (struct acl_subject_label *)
45041 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
45042 + return ERR_PTR(-ENOMEM);
45043 +
45044 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
45045 + if (subjmap == NULL)
45046 + return ERR_PTR(-ENOMEM);
45047 +
45048 + subjmap->user = userp;
45049 + subjmap->kernel = s_tmp;
45050 + insert_subj_map_entry(subjmap);
45051 +
45052 + if (copy_from_user(s_tmp, userp,
45053 + sizeof (struct acl_subject_label)))
45054 + return ERR_PTR(-EFAULT);
45055 +
45056 + len = strnlen_user(s_tmp->filename, PATH_MAX);
45057 +
45058 + if (!len || len >= PATH_MAX)
45059 + return ERR_PTR(-EINVAL);
45060 +
45061 + if ((tmp = (char *) acl_alloc(len)) == NULL)
45062 + return ERR_PTR(-ENOMEM);
45063 +
45064 + if (copy_from_user(tmp, s_tmp->filename, len))
45065 + return ERR_PTR(-EFAULT);
45066 + tmp[len-1] = '\0';
45067 + s_tmp->filename = tmp;
45068 +
45069 + if (!strcmp(s_tmp->filename, "/"))
45070 + role->root_label = s_tmp;
45071 +
45072 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
45073 + return ERR_PTR(-EFAULT);
45074 +
45075 + /* copy user and group transition tables */
45076 +
45077 + if (s_tmp->user_trans_num) {
45078 + uid_t *uidlist;
45079 +
45080 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
45081 + if (uidlist == NULL)
45082 + return ERR_PTR(-ENOMEM);
45083 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
45084 + return ERR_PTR(-EFAULT);
45085 +
45086 + s_tmp->user_transitions = uidlist;
45087 + }
45088 +
45089 + if (s_tmp->group_trans_num) {
45090 + gid_t *gidlist;
45091 +
45092 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
45093 + if (gidlist == NULL)
45094 + return ERR_PTR(-ENOMEM);
45095 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
45096 + return ERR_PTR(-EFAULT);
45097 +
45098 + s_tmp->group_transitions = gidlist;
45099 + }
45100 +
45101 + /* set up object hash table */
45102 + num_objs = count_user_objs(ghash.first);
45103 +
45104 + s_tmp->obj_hash_size = num_objs;
45105 + s_tmp->obj_hash =
45106 + (struct acl_object_label **)
45107 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
45108 +
45109 + if (!s_tmp->obj_hash)
45110 + return ERR_PTR(-ENOMEM);
45111 +
45112 + memset(s_tmp->obj_hash, 0,
45113 + s_tmp->obj_hash_size *
45114 + sizeof (struct acl_object_label *));
45115 +
45116 + /* add in objects */
45117 + err = copy_user_objs(ghash.first, s_tmp, role);
45118 +
45119 + if (err)
45120 + return ERR_PTR(err);
45121 +
45122 + /* set pointer for parent subject */
45123 + if (s_tmp->parent_subject) {
45124 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
45125 +
45126 + if (IS_ERR(s_tmp2))
45127 + return s_tmp2;
45128 +
45129 + s_tmp->parent_subject = s_tmp2;
45130 + }
45131 +
45132 + /* add in ip acls */
45133 +
45134 + if (!s_tmp->ip_num) {
45135 + s_tmp->ips = NULL;
45136 + goto insert;
45137 + }
45138 +
45139 + i_tmp =
45140 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
45141 + sizeof (struct acl_ip_label *));
45142 +
45143 + if (!i_tmp)
45144 + return ERR_PTR(-ENOMEM);
45145 +
45146 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
45147 + *(i_tmp + i_num) =
45148 + (struct acl_ip_label *)
45149 + acl_alloc(sizeof (struct acl_ip_label));
45150 + if (!*(i_tmp + i_num))
45151 + return ERR_PTR(-ENOMEM);
45152 +
45153 + if (copy_from_user
45154 + (&i_utmp2, s_tmp->ips + i_num,
45155 + sizeof (struct acl_ip_label *)))
45156 + return ERR_PTR(-EFAULT);
45157 +
45158 + if (copy_from_user
45159 + (*(i_tmp + i_num), i_utmp2,
45160 + sizeof (struct acl_ip_label)))
45161 + return ERR_PTR(-EFAULT);
45162 +
45163 + if ((*(i_tmp + i_num))->iface == NULL)
45164 + continue;
45165 +
45166 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
45167 + if (!len || len >= IFNAMSIZ)
45168 + return ERR_PTR(-EINVAL);
45169 + tmp = acl_alloc(len);
45170 + if (tmp == NULL)
45171 + return ERR_PTR(-ENOMEM);
45172 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
45173 + return ERR_PTR(-EFAULT);
45174 + (*(i_tmp + i_num))->iface = tmp;
45175 + }
45176 +
45177 + s_tmp->ips = i_tmp;
45178 +
45179 +insert:
45180 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
45181 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
45182 + return ERR_PTR(-ENOMEM);
45183 +
45184 + return s_tmp;
45185 +}
45186 +
45187 +static int
45188 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
45189 +{
45190 + struct acl_subject_label s_pre;
45191 + struct acl_subject_label * ret;
45192 + int err;
45193 +
45194 + while (userp) {
45195 + if (copy_from_user(&s_pre, userp,
45196 + sizeof (struct acl_subject_label)))
45197 + return -EFAULT;
45198 +
45199 + /* do not add nested subjects here, add
45200 + while parsing objects
45201 + */
45202 +
45203 + if (s_pre.mode & GR_NESTED) {
45204 + userp = s_pre.prev;
45205 + continue;
45206 + }
45207 +
45208 + ret = do_copy_user_subj(userp, role);
45209 +
45210 + err = PTR_ERR(ret);
45211 + if (IS_ERR(ret))
45212 + return err;
45213 +
45214 + insert_acl_subj_label(ret, role);
45215 +
45216 + userp = s_pre.prev;
45217 + }
45218 +
45219 + return 0;
45220 +}
45221 +
45222 +static int
45223 +copy_user_acl(struct gr_arg *arg)
45224 +{
45225 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
45226 + struct sprole_pw *sptmp;
45227 + struct gr_hash_struct *ghash;
45228 + uid_t *domainlist;
45229 + unsigned int r_num;
45230 + unsigned int len;
45231 + char *tmp;
45232 + int err = 0;
45233 + __u16 i;
45234 + __u32 num_subjs;
45235 +
45236 + /* we need a default and kernel role */
45237 + if (arg->role_db.num_roles < 2)
45238 + return -EINVAL;
45239 +
45240 + /* copy special role authentication info from userspace */
45241 +
45242 + num_sprole_pws = arg->num_sprole_pws;
45243 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
45244 +
45245 + if (!acl_special_roles) {
45246 + err = -ENOMEM;
45247 + goto cleanup;
45248 + }
45249 +
45250 + for (i = 0; i < num_sprole_pws; i++) {
45251 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
45252 + if (!sptmp) {
45253 + err = -ENOMEM;
45254 + goto cleanup;
45255 + }
45256 + if (copy_from_user(sptmp, arg->sprole_pws + i,
45257 + sizeof (struct sprole_pw))) {
45258 + err = -EFAULT;
45259 + goto cleanup;
45260 + }
45261 +
45262 + len =
45263 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
45264 +
45265 + if (!len || len >= GR_SPROLE_LEN) {
45266 + err = -EINVAL;
45267 + goto cleanup;
45268 + }
45269 +
45270 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
45271 + err = -ENOMEM;
45272 + goto cleanup;
45273 + }
45274 +
45275 + if (copy_from_user(tmp, sptmp->rolename, len)) {
45276 + err = -EFAULT;
45277 + goto cleanup;
45278 + }
45279 + tmp[len-1] = '\0';
45280 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45281 + printk(KERN_ALERT "Copying special role %s\n", tmp);
45282 +#endif
45283 + sptmp->rolename = tmp;
45284 + acl_special_roles[i] = sptmp;
45285 + }
45286 +
45287 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
45288 +
45289 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
45290 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
45291 +
45292 + if (!r_tmp) {
45293 + err = -ENOMEM;
45294 + goto cleanup;
45295 + }
45296 +
45297 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
45298 + sizeof (struct acl_role_label *))) {
45299 + err = -EFAULT;
45300 + goto cleanup;
45301 + }
45302 +
45303 + if (copy_from_user(r_tmp, r_utmp2,
45304 + sizeof (struct acl_role_label))) {
45305 + err = -EFAULT;
45306 + goto cleanup;
45307 + }
45308 +
45309 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
45310 +
45311 + if (!len || len >= PATH_MAX) {
45312 + err = -EINVAL;
45313 + goto cleanup;
45314 + }
45315 +
45316 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
45317 + err = -ENOMEM;
45318 + goto cleanup;
45319 + }
45320 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
45321 + err = -EFAULT;
45322 + goto cleanup;
45323 + }
45324 + tmp[len-1] = '\0';
45325 + r_tmp->rolename = tmp;
45326 +
45327 + if (!strcmp(r_tmp->rolename, "default")
45328 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
45329 + default_role = r_tmp;
45330 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
45331 + kernel_role = r_tmp;
45332 + }
45333 +
45334 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
45335 + err = -ENOMEM;
45336 + goto cleanup;
45337 + }
45338 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
45339 + err = -EFAULT;
45340 + goto cleanup;
45341 + }
45342 +
45343 + r_tmp->hash = ghash;
45344 +
45345 + num_subjs = count_user_subjs(r_tmp->hash->first);
45346 +
45347 + r_tmp->subj_hash_size = num_subjs;
45348 + r_tmp->subj_hash =
45349 + (struct acl_subject_label **)
45350 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
45351 +
45352 + if (!r_tmp->subj_hash) {
45353 + err = -ENOMEM;
45354 + goto cleanup;
45355 + }
45356 +
45357 + err = copy_user_allowedips(r_tmp);
45358 + if (err)
45359 + goto cleanup;
45360 +
45361 + /* copy domain info */
45362 + if (r_tmp->domain_children != NULL) {
45363 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
45364 + if (domainlist == NULL) {
45365 + err = -ENOMEM;
45366 + goto cleanup;
45367 + }
45368 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
45369 + err = -EFAULT;
45370 + goto cleanup;
45371 + }
45372 + r_tmp->domain_children = domainlist;
45373 + }
45374 +
45375 + err = copy_user_transitions(r_tmp);
45376 + if (err)
45377 + goto cleanup;
45378 +
45379 + memset(r_tmp->subj_hash, 0,
45380 + r_tmp->subj_hash_size *
45381 + sizeof (struct acl_subject_label *));
45382 +
45383 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
45384 +
45385 + if (err)
45386 + goto cleanup;
45387 +
45388 + /* set nested subject list to null */
45389 + r_tmp->hash->first = NULL;
45390 +
45391 + insert_acl_role_label(r_tmp);
45392 + }
45393 +
45394 + goto return_err;
45395 + cleanup:
45396 + free_variables();
45397 + return_err:
45398 + return err;
45399 +
45400 +}
45401 +
45402 +static int
45403 +gracl_init(struct gr_arg *args)
45404 +{
45405 + int error = 0;
45406 +
45407 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
45408 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
45409 +
45410 + if (init_variables(args)) {
45411 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
45412 + error = -ENOMEM;
45413 + free_variables();
45414 + goto out;
45415 + }
45416 +
45417 + error = copy_user_acl(args);
45418 + free_init_variables();
45419 + if (error) {
45420 + free_variables();
45421 + goto out;
45422 + }
45423 +
45424 + if ((error = gr_set_acls(0))) {
45425 + free_variables();
45426 + goto out;
45427 + }
45428 +
45429 + pax_open_kernel();
45430 + gr_status |= GR_READY;
45431 + pax_close_kernel();
45432 +
45433 + out:
45434 + return error;
45435 +}
45436 +
45437 +/* derived from glibc fnmatch() 0: match, 1: no match*/
45438 +
45439 +static int
45440 +glob_match(const char *p, const char *n)
45441 +{
45442 + char c;
45443 +
45444 + while ((c = *p++) != '\0') {
45445 + switch (c) {
45446 + case '?':
45447 + if (*n == '\0')
45448 + return 1;
45449 + else if (*n == '/')
45450 + return 1;
45451 + break;
45452 + case '\\':
45453 + if (*n != c)
45454 + return 1;
45455 + break;
45456 + case '*':
45457 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
45458 + if (*n == '/')
45459 + return 1;
45460 + else if (c == '?') {
45461 + if (*n == '\0')
45462 + return 1;
45463 + else
45464 + ++n;
45465 + }
45466 + }
45467 + if (c == '\0') {
45468 + return 0;
45469 + } else {
45470 + const char *endp;
45471 +
45472 + if ((endp = strchr(n, '/')) == NULL)
45473 + endp = n + strlen(n);
45474 +
45475 + if (c == '[') {
45476 + for (--p; n < endp; ++n)
45477 + if (!glob_match(p, n))
45478 + return 0;
45479 + } else if (c == '/') {
45480 + while (*n != '\0' && *n != '/')
45481 + ++n;
45482 + if (*n == '/' && !glob_match(p, n + 1))
45483 + return 0;
45484 + } else {
45485 + for (--p; n < endp; ++n)
45486 + if (*n == c && !glob_match(p, n))
45487 + return 0;
45488 + }
45489 +
45490 + return 1;
45491 + }
45492 + case '[':
45493 + {
45494 + int not;
45495 + char cold;
45496 +
45497 + if (*n == '\0' || *n == '/')
45498 + return 1;
45499 +
45500 + not = (*p == '!' || *p == '^');
45501 + if (not)
45502 + ++p;
45503 +
45504 + c = *p++;
45505 + for (;;) {
45506 + unsigned char fn = (unsigned char)*n;
45507 +
45508 + if (c == '\0')
45509 + return 1;
45510 + else {
45511 + if (c == fn)
45512 + goto matched;
45513 + cold = c;
45514 + c = *p++;
45515 +
45516 + if (c == '-' && *p != ']') {
45517 + unsigned char cend = *p++;
45518 +
45519 + if (cend == '\0')
45520 + return 1;
45521 +
45522 + if (cold <= fn && fn <= cend)
45523 + goto matched;
45524 +
45525 + c = *p++;
45526 + }
45527 + }
45528 +
45529 + if (c == ']')
45530 + break;
45531 + }
45532 + if (!not)
45533 + return 1;
45534 + break;
45535 + matched:
45536 + while (c != ']') {
45537 + if (c == '\0')
45538 + return 1;
45539 +
45540 + c = *p++;
45541 + }
45542 + if (not)
45543 + return 1;
45544 + }
45545 + break;
45546 + default:
45547 + if (c != *n)
45548 + return 1;
45549 + }
45550 +
45551 + ++n;
45552 + }
45553 +
45554 + if (*n == '\0')
45555 + return 0;
45556 +
45557 + if (*n == '/')
45558 + return 0;
45559 +
45560 + return 1;
45561 +}
45562 +
45563 +static struct acl_object_label *
45564 +chk_glob_label(struct acl_object_label *globbed,
45565 + struct dentry *dentry, struct vfsmount *mnt, char **path)
45566 +{
45567 + struct acl_object_label *tmp;
45568 +
45569 + if (*path == NULL)
45570 + *path = gr_to_filename_nolock(dentry, mnt);
45571 +
45572 + tmp = globbed;
45573 +
45574 + while (tmp) {
45575 + if (!glob_match(tmp->filename, *path))
45576 + return tmp;
45577 + tmp = tmp->next;
45578 + }
45579 +
45580 + return NULL;
45581 +}
45582 +
45583 +static struct acl_object_label *
45584 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45585 + const ino_t curr_ino, const dev_t curr_dev,
45586 + const struct acl_subject_label *subj, char **path, const int checkglob)
45587 +{
45588 + struct acl_subject_label *tmpsubj;
45589 + struct acl_object_label *retval;
45590 + struct acl_object_label *retval2;
45591 +
45592 + tmpsubj = (struct acl_subject_label *) subj;
45593 + read_lock(&gr_inode_lock);
45594 + do {
45595 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
45596 + if (retval) {
45597 + if (checkglob && retval->globbed) {
45598 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
45599 + (struct vfsmount *)orig_mnt, path);
45600 + if (retval2)
45601 + retval = retval2;
45602 + }
45603 + break;
45604 + }
45605 + } while ((tmpsubj = tmpsubj->parent_subject));
45606 + read_unlock(&gr_inode_lock);
45607 +
45608 + return retval;
45609 +}
45610 +
45611 +static __inline__ struct acl_object_label *
45612 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
45613 + const struct dentry *curr_dentry,
45614 + const struct acl_subject_label *subj, char **path, const int checkglob)
45615 +{
45616 + int newglob = checkglob;
45617 +
45618 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
45619 + as we don't want a / * rule to match instead of the / object
45620 + don't do this for create lookups that call this function though, since they're looking up
45621 + on the parent and thus need globbing checks on all paths
45622 + */
45623 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
45624 + newglob = GR_NO_GLOB;
45625 +
45626 + return __full_lookup(orig_dentry, orig_mnt,
45627 + curr_dentry->d_inode->i_ino,
45628 + __get_dev(curr_dentry), subj, path, newglob);
45629 +}
45630 +
45631 +static struct acl_object_label *
45632 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45633 + const struct acl_subject_label *subj, char *path, const int checkglob)
45634 +{
45635 + struct dentry *dentry = (struct dentry *) l_dentry;
45636 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45637 + struct acl_object_label *retval;
45638 +
45639 + spin_lock(&dcache_lock);
45640 + spin_lock(&vfsmount_lock);
45641 +
45642 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
45643 +#ifdef CONFIG_NET
45644 + mnt == sock_mnt ||
45645 +#endif
45646 +#ifdef CONFIG_HUGETLBFS
45647 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
45648 +#endif
45649 + /* ignore Eric Biederman */
45650 + IS_PRIVATE(l_dentry->d_inode))) {
45651 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
45652 + goto out;
45653 + }
45654 +
45655 + for (;;) {
45656 + if (dentry == real_root && mnt == real_root_mnt)
45657 + break;
45658 +
45659 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45660 + if (mnt->mnt_parent == mnt)
45661 + break;
45662 +
45663 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45664 + if (retval != NULL)
45665 + goto out;
45666 +
45667 + dentry = mnt->mnt_mountpoint;
45668 + mnt = mnt->mnt_parent;
45669 + continue;
45670 + }
45671 +
45672 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45673 + if (retval != NULL)
45674 + goto out;
45675 +
45676 + dentry = dentry->d_parent;
45677 + }
45678 +
45679 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
45680 +
45681 + if (retval == NULL)
45682 + retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
45683 +out:
45684 + spin_unlock(&vfsmount_lock);
45685 + spin_unlock(&dcache_lock);
45686 +
45687 + BUG_ON(retval == NULL);
45688 +
45689 + return retval;
45690 +}
45691 +
45692 +static __inline__ struct acl_object_label *
45693 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45694 + const struct acl_subject_label *subj)
45695 +{
45696 + char *path = NULL;
45697 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
45698 +}
45699 +
45700 +static __inline__ struct acl_object_label *
45701 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45702 + const struct acl_subject_label *subj)
45703 +{
45704 + char *path = NULL;
45705 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
45706 +}
45707 +
45708 +static __inline__ struct acl_object_label *
45709 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45710 + const struct acl_subject_label *subj, char *path)
45711 +{
45712 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
45713 +}
45714 +
45715 +static struct acl_subject_label *
45716 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
45717 + const struct acl_role_label *role)
45718 +{
45719 + struct dentry *dentry = (struct dentry *) l_dentry;
45720 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
45721 + struct acl_subject_label *retval;
45722 +
45723 + spin_lock(&dcache_lock);
45724 + spin_lock(&vfsmount_lock);
45725 +
45726 + for (;;) {
45727 + if (dentry == real_root && mnt == real_root_mnt)
45728 + break;
45729 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
45730 + if (mnt->mnt_parent == mnt)
45731 + break;
45732 +
45733 + read_lock(&gr_inode_lock);
45734 + retval =
45735 + lookup_acl_subj_label(dentry->d_inode->i_ino,
45736 + __get_dev(dentry), role);
45737 + read_unlock(&gr_inode_lock);
45738 + if (retval != NULL)
45739 + goto out;
45740 +
45741 + dentry = mnt->mnt_mountpoint;
45742 + mnt = mnt->mnt_parent;
45743 + continue;
45744 + }
45745 +
45746 + read_lock(&gr_inode_lock);
45747 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
45748 + __get_dev(dentry), role);
45749 + read_unlock(&gr_inode_lock);
45750 + if (retval != NULL)
45751 + goto out;
45752 +
45753 + dentry = dentry->d_parent;
45754 + }
45755 +
45756 + read_lock(&gr_inode_lock);
45757 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
45758 + __get_dev(dentry), role);
45759 + read_unlock(&gr_inode_lock);
45760 +
45761 + if (unlikely(retval == NULL)) {
45762 + read_lock(&gr_inode_lock);
45763 + retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
45764 + __get_dev(real_root), role);
45765 + read_unlock(&gr_inode_lock);
45766 + }
45767 +out:
45768 + spin_unlock(&vfsmount_lock);
45769 + spin_unlock(&dcache_lock);
45770 +
45771 + BUG_ON(retval == NULL);
45772 +
45773 + return retval;
45774 +}
45775 +
45776 +static void
45777 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
45778 +{
45779 + struct task_struct *task = current;
45780 + const struct cred *cred = current_cred();
45781 +
45782 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
45783 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45784 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45785 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
45786 +
45787 + return;
45788 +}
45789 +
45790 +static void
45791 +gr_log_learn_sysctl(const char *path, const __u32 mode)
45792 +{
45793 + struct task_struct *task = current;
45794 + const struct cred *cred = current_cred();
45795 +
45796 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
45797 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45798 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45799 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
45800 +
45801 + return;
45802 +}
45803 +
45804 +static void
45805 +gr_log_learn_id_change(const char type, const unsigned int real,
45806 + const unsigned int effective, const unsigned int fs)
45807 +{
45808 + struct task_struct *task = current;
45809 + const struct cred *cred = current_cred();
45810 +
45811 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
45812 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
45813 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
45814 + type, real, effective, fs, &task->signal->saved_ip);
45815 +
45816 + return;
45817 +}
45818 +
45819 +__u32
45820 +gr_check_link(const struct dentry * new_dentry,
45821 + const struct dentry * parent_dentry,
45822 + const struct vfsmount * parent_mnt,
45823 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
45824 +{
45825 + struct acl_object_label *obj;
45826 + __u32 oldmode, newmode;
45827 + __u32 needmode;
45828 +
45829 + if (unlikely(!(gr_status & GR_READY)))
45830 + return (GR_CREATE | GR_LINK);
45831 +
45832 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
45833 + oldmode = obj->mode;
45834 +
45835 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45836 + oldmode |= (GR_CREATE | GR_LINK);
45837 +
45838 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
45839 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
45840 + needmode |= GR_SETID | GR_AUDIT_SETID;
45841 +
45842 + newmode =
45843 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
45844 + oldmode | needmode);
45845 +
45846 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
45847 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
45848 + GR_INHERIT | GR_AUDIT_INHERIT);
45849 +
45850 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
45851 + goto bad;
45852 +
45853 + if ((oldmode & needmode) != needmode)
45854 + goto bad;
45855 +
45856 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
45857 + if ((newmode & needmode) != needmode)
45858 + goto bad;
45859 +
45860 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
45861 + return newmode;
45862 +bad:
45863 + needmode = oldmode;
45864 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
45865 + needmode |= GR_SETID;
45866 +
45867 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
45868 + gr_log_learn(old_dentry, old_mnt, needmode);
45869 + return (GR_CREATE | GR_LINK);
45870 + } else if (newmode & GR_SUPPRESS)
45871 + return GR_SUPPRESS;
45872 + else
45873 + return 0;
45874 +}
45875 +
45876 +__u32
45877 +gr_search_file(const struct dentry * dentry, const __u32 mode,
45878 + const struct vfsmount * mnt)
45879 +{
45880 + __u32 retval = mode;
45881 + struct acl_subject_label *curracl;
45882 + struct acl_object_label *currobj;
45883 +
45884 + if (unlikely(!(gr_status & GR_READY)))
45885 + return (mode & ~GR_AUDITS);
45886 +
45887 + curracl = current->acl;
45888 +
45889 + currobj = chk_obj_label(dentry, mnt, curracl);
45890 + retval = currobj->mode & mode;
45891 +
45892 + /* if we're opening a specified transfer file for writing
45893 + (e.g. /dev/initctl), then transfer our role to init
45894 + */
45895 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
45896 + current->role->roletype & GR_ROLE_PERSIST)) {
45897 + struct task_struct *task = init_pid_ns.child_reaper;
45898 +
45899 + if (task->role != current->role) {
45900 + task->acl_sp_role = 0;
45901 + task->acl_role_id = current->acl_role_id;
45902 + task->role = current->role;
45903 + rcu_read_lock();
45904 + read_lock(&grsec_exec_file_lock);
45905 + gr_apply_subject_to_task(task);
45906 + read_unlock(&grsec_exec_file_lock);
45907 + rcu_read_unlock();
45908 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
45909 + }
45910 + }
45911 +
45912 + if (unlikely
45913 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
45914 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
45915 + __u32 new_mode = mode;
45916 +
45917 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45918 +
45919 + retval = new_mode;
45920 +
45921 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
45922 + new_mode |= GR_INHERIT;
45923 +
45924 + if (!(mode & GR_NOLEARN))
45925 + gr_log_learn(dentry, mnt, new_mode);
45926 + }
45927 +
45928 + return retval;
45929 +}
45930 +
45931 +__u32
45932 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
45933 + const struct vfsmount * mnt, const __u32 mode)
45934 +{
45935 + struct name_entry *match;
45936 + struct acl_object_label *matchpo;
45937 + struct acl_subject_label *curracl;
45938 + char *path;
45939 + __u32 retval;
45940 +
45941 + if (unlikely(!(gr_status & GR_READY)))
45942 + return (mode & ~GR_AUDITS);
45943 +
45944 + preempt_disable();
45945 + path = gr_to_filename_rbac(new_dentry, mnt);
45946 + match = lookup_name_entry_create(path);
45947 +
45948 + if (!match)
45949 + goto check_parent;
45950 +
45951 + curracl = current->acl;
45952 +
45953 + read_lock(&gr_inode_lock);
45954 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
45955 + read_unlock(&gr_inode_lock);
45956 +
45957 + if (matchpo) {
45958 + if ((matchpo->mode & mode) !=
45959 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
45960 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
45961 + __u32 new_mode = mode;
45962 +
45963 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45964 +
45965 + gr_log_learn(new_dentry, mnt, new_mode);
45966 +
45967 + preempt_enable();
45968 + return new_mode;
45969 + }
45970 + preempt_enable();
45971 + return (matchpo->mode & mode);
45972 + }
45973 +
45974 + check_parent:
45975 + curracl = current->acl;
45976 +
45977 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
45978 + retval = matchpo->mode & mode;
45979 +
45980 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
45981 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
45982 + __u32 new_mode = mode;
45983 +
45984 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45985 +
45986 + gr_log_learn(new_dentry, mnt, new_mode);
45987 + preempt_enable();
45988 + return new_mode;
45989 + }
45990 +
45991 + preempt_enable();
45992 + return retval;
45993 +}
45994 +
45995 +int
45996 +gr_check_hidden_task(const struct task_struct *task)
45997 +{
45998 + if (unlikely(!(gr_status & GR_READY)))
45999 + return 0;
46000 +
46001 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
46002 + return 1;
46003 +
46004 + return 0;
46005 +}
46006 +
46007 +int
46008 +gr_check_protected_task(const struct task_struct *task)
46009 +{
46010 + if (unlikely(!(gr_status & GR_READY) || !task))
46011 + return 0;
46012 +
46013 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46014 + task->acl != current->acl)
46015 + return 1;
46016 +
46017 + return 0;
46018 +}
46019 +
46020 +int
46021 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
46022 +{
46023 + struct task_struct *p;
46024 + int ret = 0;
46025 +
46026 + if (unlikely(!(gr_status & GR_READY) || !pid))
46027 + return ret;
46028 +
46029 + read_lock(&tasklist_lock);
46030 + do_each_pid_task(pid, type, p) {
46031 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46032 + p->acl != current->acl) {
46033 + ret = 1;
46034 + goto out;
46035 + }
46036 + } while_each_pid_task(pid, type, p);
46037 +out:
46038 + read_unlock(&tasklist_lock);
46039 +
46040 + return ret;
46041 +}
46042 +
46043 +void
46044 +gr_copy_label(struct task_struct *tsk)
46045 +{
46046 + tsk->signal->used_accept = 0;
46047 + tsk->acl_sp_role = 0;
46048 + tsk->acl_role_id = current->acl_role_id;
46049 + tsk->acl = current->acl;
46050 + tsk->role = current->role;
46051 + tsk->signal->curr_ip = current->signal->curr_ip;
46052 + tsk->signal->saved_ip = current->signal->saved_ip;
46053 + if (current->exec_file)
46054 + get_file(current->exec_file);
46055 + tsk->exec_file = current->exec_file;
46056 + tsk->is_writable = current->is_writable;
46057 + if (unlikely(current->signal->used_accept)) {
46058 + current->signal->curr_ip = 0;
46059 + current->signal->saved_ip = 0;
46060 + }
46061 +
46062 + return;
46063 +}
46064 +
46065 +static void
46066 +gr_set_proc_res(struct task_struct *task)
46067 +{
46068 + struct acl_subject_label *proc;
46069 + unsigned short i;
46070 +
46071 + proc = task->acl;
46072 +
46073 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
46074 + return;
46075 +
46076 + for (i = 0; i < RLIM_NLIMITS; i++) {
46077 + if (!(proc->resmask & (1 << i)))
46078 + continue;
46079 +
46080 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
46081 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
46082 + }
46083 +
46084 + return;
46085 +}
46086 +
46087 +extern int __gr_process_user_ban(struct user_struct *user);
46088 +
46089 +int
46090 +gr_check_user_change(int real, int effective, int fs)
46091 +{
46092 + unsigned int i;
46093 + __u16 num;
46094 + uid_t *uidlist;
46095 + int curuid;
46096 + int realok = 0;
46097 + int effectiveok = 0;
46098 + int fsok = 0;
46099 +
46100 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
46101 + struct user_struct *user;
46102 +
46103 + if (real == -1)
46104 + goto skipit;
46105 +
46106 + user = find_user(real);
46107 + if (user == NULL)
46108 + goto skipit;
46109 +
46110 + if (__gr_process_user_ban(user)) {
46111 + /* for find_user */
46112 + free_uid(user);
46113 + return 1;
46114 + }
46115 +
46116 + /* for find_user */
46117 + free_uid(user);
46118 +
46119 +skipit:
46120 +#endif
46121 +
46122 + if (unlikely(!(gr_status & GR_READY)))
46123 + return 0;
46124 +
46125 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46126 + gr_log_learn_id_change('u', real, effective, fs);
46127 +
46128 + num = current->acl->user_trans_num;
46129 + uidlist = current->acl->user_transitions;
46130 +
46131 + if (uidlist == NULL)
46132 + return 0;
46133 +
46134 + if (real == -1)
46135 + realok = 1;
46136 + if (effective == -1)
46137 + effectiveok = 1;
46138 + if (fs == -1)
46139 + fsok = 1;
46140 +
46141 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
46142 + for (i = 0; i < num; i++) {
46143 + curuid = (int)uidlist[i];
46144 + if (real == curuid)
46145 + realok = 1;
46146 + if (effective == curuid)
46147 + effectiveok = 1;
46148 + if (fs == curuid)
46149 + fsok = 1;
46150 + }
46151 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
46152 + for (i = 0; i < num; i++) {
46153 + curuid = (int)uidlist[i];
46154 + if (real == curuid)
46155 + break;
46156 + if (effective == curuid)
46157 + break;
46158 + if (fs == curuid)
46159 + break;
46160 + }
46161 + /* not in deny list */
46162 + if (i == num) {
46163 + realok = 1;
46164 + effectiveok = 1;
46165 + fsok = 1;
46166 + }
46167 + }
46168 +
46169 + if (realok && effectiveok && fsok)
46170 + return 0;
46171 + else {
46172 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46173 + return 1;
46174 + }
46175 +}
46176 +
46177 +int
46178 +gr_check_group_change(int real, int effective, int fs)
46179 +{
46180 + unsigned int i;
46181 + __u16 num;
46182 + gid_t *gidlist;
46183 + int curgid;
46184 + int realok = 0;
46185 + int effectiveok = 0;
46186 + int fsok = 0;
46187 +
46188 + if (unlikely(!(gr_status & GR_READY)))
46189 + return 0;
46190 +
46191 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46192 + gr_log_learn_id_change('g', real, effective, fs);
46193 +
46194 + num = current->acl->group_trans_num;
46195 + gidlist = current->acl->group_transitions;
46196 +
46197 + if (gidlist == NULL)
46198 + return 0;
46199 +
46200 + if (real == -1)
46201 + realok = 1;
46202 + if (effective == -1)
46203 + effectiveok = 1;
46204 + if (fs == -1)
46205 + fsok = 1;
46206 +
46207 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
46208 + for (i = 0; i < num; i++) {
46209 + curgid = (int)gidlist[i];
46210 + if (real == curgid)
46211 + realok = 1;
46212 + if (effective == curgid)
46213 + effectiveok = 1;
46214 + if (fs == curgid)
46215 + fsok = 1;
46216 + }
46217 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
46218 + for (i = 0; i < num; i++) {
46219 + curgid = (int)gidlist[i];
46220 + if (real == curgid)
46221 + break;
46222 + if (effective == curgid)
46223 + break;
46224 + if (fs == curgid)
46225 + break;
46226 + }
46227 + /* not in deny list */
46228 + if (i == num) {
46229 + realok = 1;
46230 + effectiveok = 1;
46231 + fsok = 1;
46232 + }
46233 + }
46234 +
46235 + if (realok && effectiveok && fsok)
46236 + return 0;
46237 + else {
46238 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46239 + return 1;
46240 + }
46241 +}
46242 +
46243 +void
46244 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
46245 +{
46246 + struct acl_role_label *role = task->role;
46247 + struct acl_subject_label *subj = NULL;
46248 + struct acl_object_label *obj;
46249 + struct file *filp;
46250 +
46251 + if (unlikely(!(gr_status & GR_READY)))
46252 + return;
46253 +
46254 + filp = task->exec_file;
46255 +
46256 + /* kernel process, we'll give them the kernel role */
46257 + if (unlikely(!filp)) {
46258 + task->role = kernel_role;
46259 + task->acl = kernel_role->root_label;
46260 + return;
46261 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
46262 + role = lookup_acl_role_label(task, uid, gid);
46263 +
46264 + /* perform subject lookup in possibly new role
46265 + we can use this result below in the case where role == task->role
46266 + */
46267 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
46268 +
46269 + /* if we changed uid/gid, but result in the same role
46270 + and are using inheritance, don't lose the inherited subject
46271 + if current subject is other than what normal lookup
46272 + would result in, we arrived via inheritance, don't
46273 + lose subject
46274 + */
46275 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
46276 + (subj == task->acl)))
46277 + task->acl = subj;
46278 +
46279 + task->role = role;
46280 +
46281 + task->is_writable = 0;
46282 +
46283 + /* ignore additional mmap checks for processes that are writable
46284 + by the default ACL */
46285 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46286 + if (unlikely(obj->mode & GR_WRITE))
46287 + task->is_writable = 1;
46288 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
46289 + if (unlikely(obj->mode & GR_WRITE))
46290 + task->is_writable = 1;
46291 +
46292 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46293 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46294 +#endif
46295 +
46296 + gr_set_proc_res(task);
46297 +
46298 + return;
46299 +}
46300 +
46301 +int
46302 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
46303 + const int unsafe_share)
46304 +{
46305 + struct task_struct *task = current;
46306 + struct acl_subject_label *newacl;
46307 + struct acl_object_label *obj;
46308 + __u32 retmode;
46309 +
46310 + if (unlikely(!(gr_status & GR_READY)))
46311 + return 0;
46312 +
46313 + newacl = chk_subj_label(dentry, mnt, task->role);
46314 +
46315 + task_lock(task);
46316 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
46317 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
46318 + !(task->role->roletype & GR_ROLE_GOD) &&
46319 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
46320 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
46321 + task_unlock(task);
46322 + if (unsafe_share)
46323 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
46324 + else
46325 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
46326 + return -EACCES;
46327 + }
46328 + task_unlock(task);
46329 +
46330 + obj = chk_obj_label(dentry, mnt, task->acl);
46331 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
46332 +
46333 + if (!(task->acl->mode & GR_INHERITLEARN) &&
46334 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
46335 + if (obj->nested)
46336 + task->acl = obj->nested;
46337 + else
46338 + task->acl = newacl;
46339 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
46340 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
46341 +
46342 + task->is_writable = 0;
46343 +
46344 + /* ignore additional mmap checks for processes that are writable
46345 + by the default ACL */
46346 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
46347 + if (unlikely(obj->mode & GR_WRITE))
46348 + task->is_writable = 1;
46349 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
46350 + if (unlikely(obj->mode & GR_WRITE))
46351 + task->is_writable = 1;
46352 +
46353 + gr_set_proc_res(task);
46354 +
46355 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46356 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
46357 +#endif
46358 + return 0;
46359 +}
46360 +
46361 +/* always called with valid inodev ptr */
46362 +static void
46363 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
46364 +{
46365 + struct acl_object_label *matchpo;
46366 + struct acl_subject_label *matchps;
46367 + struct acl_subject_label *subj;
46368 + struct acl_role_label *role;
46369 + unsigned int x;
46370 +
46371 + FOR_EACH_ROLE_START(role)
46372 + FOR_EACH_SUBJECT_START(role, subj, x)
46373 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
46374 + matchpo->mode |= GR_DELETED;
46375 + FOR_EACH_SUBJECT_END(subj,x)
46376 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
46377 + if (subj->inode == ino && subj->device == dev)
46378 + subj->mode |= GR_DELETED;
46379 + FOR_EACH_NESTED_SUBJECT_END(subj)
46380 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
46381 + matchps->mode |= GR_DELETED;
46382 + FOR_EACH_ROLE_END(role)
46383 +
46384 + inodev->nentry->deleted = 1;
46385 +
46386 + return;
46387 +}
46388 +
46389 +void
46390 +gr_handle_delete(const ino_t ino, const dev_t dev)
46391 +{
46392 + struct inodev_entry *inodev;
46393 +
46394 + if (unlikely(!(gr_status & GR_READY)))
46395 + return;
46396 +
46397 + write_lock(&gr_inode_lock);
46398 + inodev = lookup_inodev_entry(ino, dev);
46399 + if (inodev != NULL)
46400 + do_handle_delete(inodev, ino, dev);
46401 + write_unlock(&gr_inode_lock);
46402 +
46403 + return;
46404 +}
46405 +
46406 +static void
46407 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
46408 + const ino_t newinode, const dev_t newdevice,
46409 + struct acl_subject_label *subj)
46410 +{
46411 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
46412 + struct acl_object_label *match;
46413 +
46414 + match = subj->obj_hash[index];
46415 +
46416 + while (match && (match->inode != oldinode ||
46417 + match->device != olddevice ||
46418 + !(match->mode & GR_DELETED)))
46419 + match = match->next;
46420 +
46421 + if (match && (match->inode == oldinode)
46422 + && (match->device == olddevice)
46423 + && (match->mode & GR_DELETED)) {
46424 + if (match->prev == NULL) {
46425 + subj->obj_hash[index] = match->next;
46426 + if (match->next != NULL)
46427 + match->next->prev = NULL;
46428 + } else {
46429 + match->prev->next = match->next;
46430 + if (match->next != NULL)
46431 + match->next->prev = match->prev;
46432 + }
46433 + match->prev = NULL;
46434 + match->next = NULL;
46435 + match->inode = newinode;
46436 + match->device = newdevice;
46437 + match->mode &= ~GR_DELETED;
46438 +
46439 + insert_acl_obj_label(match, subj);
46440 + }
46441 +
46442 + return;
46443 +}
46444 +
46445 +static void
46446 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
46447 + const ino_t newinode, const dev_t newdevice,
46448 + struct acl_role_label *role)
46449 +{
46450 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
46451 + struct acl_subject_label *match;
46452 +
46453 + match = role->subj_hash[index];
46454 +
46455 + while (match && (match->inode != oldinode ||
46456 + match->device != olddevice ||
46457 + !(match->mode & GR_DELETED)))
46458 + match = match->next;
46459 +
46460 + if (match && (match->inode == oldinode)
46461 + && (match->device == olddevice)
46462 + && (match->mode & GR_DELETED)) {
46463 + if (match->prev == NULL) {
46464 + role->subj_hash[index] = match->next;
46465 + if (match->next != NULL)
46466 + match->next->prev = NULL;
46467 + } else {
46468 + match->prev->next = match->next;
46469 + if (match->next != NULL)
46470 + match->next->prev = match->prev;
46471 + }
46472 + match->prev = NULL;
46473 + match->next = NULL;
46474 + match->inode = newinode;
46475 + match->device = newdevice;
46476 + match->mode &= ~GR_DELETED;
46477 +
46478 + insert_acl_subj_label(match, role);
46479 + }
46480 +
46481 + return;
46482 +}
46483 +
46484 +static void
46485 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
46486 + const ino_t newinode, const dev_t newdevice)
46487 +{
46488 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
46489 + struct inodev_entry *match;
46490 +
46491 + match = inodev_set.i_hash[index];
46492 +
46493 + while (match && (match->nentry->inode != oldinode ||
46494 + match->nentry->device != olddevice || !match->nentry->deleted))
46495 + match = match->next;
46496 +
46497 + if (match && (match->nentry->inode == oldinode)
46498 + && (match->nentry->device == olddevice) &&
46499 + match->nentry->deleted) {
46500 + if (match->prev == NULL) {
46501 + inodev_set.i_hash[index] = match->next;
46502 + if (match->next != NULL)
46503 + match->next->prev = NULL;
46504 + } else {
46505 + match->prev->next = match->next;
46506 + if (match->next != NULL)
46507 + match->next->prev = match->prev;
46508 + }
46509 + match->prev = NULL;
46510 + match->next = NULL;
46511 + match->nentry->inode = newinode;
46512 + match->nentry->device = newdevice;
46513 + match->nentry->deleted = 0;
46514 +
46515 + insert_inodev_entry(match);
46516 + }
46517 +
46518 + return;
46519 +}
46520 +
46521 +static void
46522 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
46523 + const struct vfsmount *mnt)
46524 +{
46525 + struct acl_subject_label *subj;
46526 + struct acl_role_label *role;
46527 + unsigned int x;
46528 + ino_t inode = dentry->d_inode->i_ino;
46529 + dev_t dev = __get_dev(dentry);
46530 +
46531 + FOR_EACH_ROLE_START(role)
46532 + update_acl_subj_label(matchn->inode, matchn->device,
46533 + inode, dev, role);
46534 +
46535 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
46536 + if ((subj->inode == inode) && (subj->device == dev)) {
46537 + subj->inode = inode;
46538 + subj->device = dev;
46539 + }
46540 + FOR_EACH_NESTED_SUBJECT_END(subj)
46541 + FOR_EACH_SUBJECT_START(role, subj, x)
46542 + update_acl_obj_label(matchn->inode, matchn->device,
46543 + inode, dev, subj);
46544 + FOR_EACH_SUBJECT_END(subj,x)
46545 + FOR_EACH_ROLE_END(role)
46546 +
46547 + update_inodev_entry(matchn->inode, matchn->device, inode, dev);
46548 +
46549 + return;
46550 +}
46551 +
46552 +void
46553 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
46554 +{
46555 + struct name_entry *matchn;
46556 +
46557 + if (unlikely(!(gr_status & GR_READY)))
46558 + return;
46559 +
46560 + preempt_disable();
46561 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
46562 +
46563 + if (unlikely((unsigned long)matchn)) {
46564 + write_lock(&gr_inode_lock);
46565 + do_handle_create(matchn, dentry, mnt);
46566 + write_unlock(&gr_inode_lock);
46567 + }
46568 + preempt_enable();
46569 +
46570 + return;
46571 +}
46572 +
46573 +void
46574 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
46575 + struct dentry *old_dentry,
46576 + struct dentry *new_dentry,
46577 + struct vfsmount *mnt, const __u8 replace)
46578 +{
46579 + struct name_entry *matchn;
46580 + struct inodev_entry *inodev;
46581 + ino_t oldinode = old_dentry->d_inode->i_ino;
46582 + dev_t olddev = __get_dev(old_dentry);
46583 +
46584 + /* vfs_rename swaps the name and parent link for old_dentry and
46585 + new_dentry
46586 + at this point, old_dentry has the new name, parent link, and inode
46587 + for the renamed file
46588 + if a file is being replaced by a rename, new_dentry has the inode
46589 + and name for the replaced file
46590 + */
46591 +
46592 + if (unlikely(!(gr_status & GR_READY)))
46593 + return;
46594 +
46595 + preempt_disable();
46596 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
46597 +
46598 + /* we wouldn't have to check d_inode if it weren't for
46599 + NFS silly-renaming
46600 + */
46601 +
46602 + write_lock(&gr_inode_lock);
46603 + if (unlikely(replace && new_dentry->d_inode)) {
46604 + ino_t newinode = new_dentry->d_inode->i_ino;
46605 + dev_t newdev = __get_dev(new_dentry);
46606 + inodev = lookup_inodev_entry(newinode, newdev);
46607 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
46608 + do_handle_delete(inodev, newinode, newdev);
46609 + }
46610 +
46611 + inodev = lookup_inodev_entry(oldinode, olddev);
46612 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
46613 + do_handle_delete(inodev, oldinode, olddev);
46614 +
46615 + if (unlikely((unsigned long)matchn))
46616 + do_handle_create(matchn, old_dentry, mnt);
46617 +
46618 + write_unlock(&gr_inode_lock);
46619 + preempt_enable();
46620 +
46621 + return;
46622 +}
46623 +
46624 +static int
46625 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
46626 + unsigned char **sum)
46627 +{
46628 + struct acl_role_label *r;
46629 + struct role_allowed_ip *ipp;
46630 + struct role_transition *trans;
46631 + unsigned int i;
46632 + int found = 0;
46633 + u32 curr_ip = current->signal->curr_ip;
46634 +
46635 + current->signal->saved_ip = curr_ip;
46636 +
46637 + /* check transition table */
46638 +
46639 + for (trans = current->role->transitions; trans; trans = trans->next) {
46640 + if (!strcmp(rolename, trans->rolename)) {
46641 + found = 1;
46642 + break;
46643 + }
46644 + }
46645 +
46646 + if (!found)
46647 + return 0;
46648 +
46649 + /* handle special roles that do not require authentication
46650 + and check ip */
46651 +
46652 + FOR_EACH_ROLE_START(r)
46653 + if (!strcmp(rolename, r->rolename) &&
46654 + (r->roletype & GR_ROLE_SPECIAL)) {
46655 + found = 0;
46656 + if (r->allowed_ips != NULL) {
46657 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
46658 + if ((ntohl(curr_ip) & ipp->netmask) ==
46659 + (ntohl(ipp->addr) & ipp->netmask))
46660 + found = 1;
46661 + }
46662 + } else
46663 + found = 2;
46664 + if (!found)
46665 + return 0;
46666 +
46667 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
46668 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
46669 + *salt = NULL;
46670 + *sum = NULL;
46671 + return 1;
46672 + }
46673 + }
46674 + FOR_EACH_ROLE_END(r)
46675 +
46676 + for (i = 0; i < num_sprole_pws; i++) {
46677 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
46678 + *salt = acl_special_roles[i]->salt;
46679 + *sum = acl_special_roles[i]->sum;
46680 + return 1;
46681 + }
46682 + }
46683 +
46684 + return 0;
46685 +}
46686 +
46687 +static void
46688 +assign_special_role(char *rolename)
46689 +{
46690 + struct acl_object_label *obj;
46691 + struct acl_role_label *r;
46692 + struct acl_role_label *assigned = NULL;
46693 + struct task_struct *tsk;
46694 + struct file *filp;
46695 +
46696 + FOR_EACH_ROLE_START(r)
46697 + if (!strcmp(rolename, r->rolename) &&
46698 + (r->roletype & GR_ROLE_SPECIAL)) {
46699 + assigned = r;
46700 + break;
46701 + }
46702 + FOR_EACH_ROLE_END(r)
46703 +
46704 + if (!assigned)
46705 + return;
46706 +
46707 + read_lock(&tasklist_lock);
46708 + read_lock(&grsec_exec_file_lock);
46709 +
46710 + tsk = current->real_parent;
46711 + if (tsk == NULL)
46712 + goto out_unlock;
46713 +
46714 + filp = tsk->exec_file;
46715 + if (filp == NULL)
46716 + goto out_unlock;
46717 +
46718 + tsk->is_writable = 0;
46719 +
46720 + tsk->acl_sp_role = 1;
46721 + tsk->acl_role_id = ++acl_sp_role_value;
46722 + tsk->role = assigned;
46723 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
46724 +
46725 + /* ignore additional mmap checks for processes that are writable
46726 + by the default ACL */
46727 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
46728 + if (unlikely(obj->mode & GR_WRITE))
46729 + tsk->is_writable = 1;
46730 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
46731 + if (unlikely(obj->mode & GR_WRITE))
46732 + tsk->is_writable = 1;
46733 +
46734 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46735 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
46736 +#endif
46737 +
46738 +out_unlock:
46739 + read_unlock(&grsec_exec_file_lock);
46740 + read_unlock(&tasklist_lock);
46741 + return;
46742 +}
46743 +
46744 +int gr_check_secure_terminal(struct task_struct *task)
46745 +{
46746 + struct task_struct *p, *p2, *p3;
46747 + struct files_struct *files;
46748 + struct fdtable *fdt;
46749 + struct file *our_file = NULL, *file;
46750 + int i;
46751 +
46752 + if (task->signal->tty == NULL)
46753 + return 1;
46754 +
46755 + files = get_files_struct(task);
46756 + if (files != NULL) {
46757 + rcu_read_lock();
46758 + fdt = files_fdtable(files);
46759 + for (i=0; i < fdt->max_fds; i++) {
46760 + file = fcheck_files(files, i);
46761 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
46762 + get_file(file);
46763 + our_file = file;
46764 + }
46765 + }
46766 + rcu_read_unlock();
46767 + put_files_struct(files);
46768 + }
46769 +
46770 + if (our_file == NULL)
46771 + return 1;
46772 +
46773 + read_lock(&tasklist_lock);
46774 + do_each_thread(p2, p) {
46775 + files = get_files_struct(p);
46776 + if (files == NULL ||
46777 + (p->signal && p->signal->tty == task->signal->tty)) {
46778 + if (files != NULL)
46779 + put_files_struct(files);
46780 + continue;
46781 + }
46782 + rcu_read_lock();
46783 + fdt = files_fdtable(files);
46784 + for (i=0; i < fdt->max_fds; i++) {
46785 + file = fcheck_files(files, i);
46786 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
46787 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
46788 + p3 = task;
46789 + while (p3->pid > 0) {
46790 + if (p3 == p)
46791 + break;
46792 + p3 = p3->real_parent;
46793 + }
46794 + if (p3 == p)
46795 + break;
46796 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
46797 + gr_handle_alertkill(p);
46798 + rcu_read_unlock();
46799 + put_files_struct(files);
46800 + read_unlock(&tasklist_lock);
46801 + fput(our_file);
46802 + return 0;
46803 + }
46804 + }
46805 + rcu_read_unlock();
46806 + put_files_struct(files);
46807 + } while_each_thread(p2, p);
46808 + read_unlock(&tasklist_lock);
46809 +
46810 + fput(our_file);
46811 + return 1;
46812 +}
46813 +
46814 +ssize_t
46815 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
46816 +{
46817 + struct gr_arg_wrapper uwrap;
46818 + unsigned char *sprole_salt = NULL;
46819 + unsigned char *sprole_sum = NULL;
46820 + int error = sizeof (struct gr_arg_wrapper);
46821 + int error2 = 0;
46822 +
46823 + mutex_lock(&gr_dev_mutex);
46824 +
46825 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
46826 + error = -EPERM;
46827 + goto out;
46828 + }
46829 +
46830 + if (count != sizeof (struct gr_arg_wrapper)) {
46831 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
46832 + error = -EINVAL;
46833 + goto out;
46834 + }
46835 +
46836 +
46837 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
46838 + gr_auth_expires = 0;
46839 + gr_auth_attempts = 0;
46840 + }
46841 +
46842 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
46843 + error = -EFAULT;
46844 + goto out;
46845 + }
46846 +
46847 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
46848 + error = -EINVAL;
46849 + goto out;
46850 + }
46851 +
46852 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
46853 + error = -EFAULT;
46854 + goto out;
46855 + }
46856 +
46857 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
46858 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
46859 + time_after(gr_auth_expires, get_seconds())) {
46860 + error = -EBUSY;
46861 + goto out;
46862 + }
46863 +
46864 + /* if non-root trying to do anything other than use a special role,
46865 + do not attempt authentication, do not count towards authentication
46866 + locking
46867 + */
46868 +
46869 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
46870 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
46871 + current_uid()) {
46872 + error = -EPERM;
46873 + goto out;
46874 + }
46875 +
46876 + /* ensure pw and special role name are null terminated */
46877 +
46878 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
46879 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
46880 +
46881 + /* Okay.
46882 + * We have our enough of the argument structure..(we have yet
46883 + * to copy_from_user the tables themselves) . Copy the tables
46884 + * only if we need them, i.e. for loading operations. */
46885 +
46886 + switch (gr_usermode->mode) {
46887 + case GR_STATUS:
46888 + if (gr_status & GR_READY) {
46889 + error = 1;
46890 + if (!gr_check_secure_terminal(current))
46891 + error = 3;
46892 + } else
46893 + error = 2;
46894 + goto out;
46895 + case GR_SHUTDOWN:
46896 + if ((gr_status & GR_READY)
46897 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46898 + pax_open_kernel();
46899 + gr_status &= ~GR_READY;
46900 + pax_close_kernel();
46901 +
46902 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
46903 + free_variables();
46904 + memset(gr_usermode, 0, sizeof (struct gr_arg));
46905 + memset(gr_system_salt, 0, GR_SALT_LEN);
46906 + memset(gr_system_sum, 0, GR_SHA_LEN);
46907 + } else if (gr_status & GR_READY) {
46908 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
46909 + error = -EPERM;
46910 + } else {
46911 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
46912 + error = -EAGAIN;
46913 + }
46914 + break;
46915 + case GR_ENABLE:
46916 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
46917 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
46918 + else {
46919 + if (gr_status & GR_READY)
46920 + error = -EAGAIN;
46921 + else
46922 + error = error2;
46923 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
46924 + }
46925 + break;
46926 + case GR_RELOAD:
46927 + if (!(gr_status & GR_READY)) {
46928 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
46929 + error = -EAGAIN;
46930 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46931 + lock_kernel();
46932 +
46933 + pax_open_kernel();
46934 + gr_status &= ~GR_READY;
46935 + pax_close_kernel();
46936 +
46937 + free_variables();
46938 + if (!(error2 = gracl_init(gr_usermode))) {
46939 + unlock_kernel();
46940 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
46941 + } else {
46942 + unlock_kernel();
46943 + error = error2;
46944 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
46945 + }
46946 + } else {
46947 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
46948 + error = -EPERM;
46949 + }
46950 + break;
46951 + case GR_SEGVMOD:
46952 + if (unlikely(!(gr_status & GR_READY))) {
46953 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
46954 + error = -EAGAIN;
46955 + break;
46956 + }
46957 +
46958 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
46959 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
46960 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
46961 + struct acl_subject_label *segvacl;
46962 + segvacl =
46963 + lookup_acl_subj_label(gr_usermode->segv_inode,
46964 + gr_usermode->segv_device,
46965 + current->role);
46966 + if (segvacl) {
46967 + segvacl->crashes = 0;
46968 + segvacl->expires = 0;
46969 + }
46970 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
46971 + gr_remove_uid(gr_usermode->segv_uid);
46972 + }
46973 + } else {
46974 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
46975 + error = -EPERM;
46976 + }
46977 + break;
46978 + case GR_SPROLE:
46979 + case GR_SPROLEPAM:
46980 + if (unlikely(!(gr_status & GR_READY))) {
46981 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
46982 + error = -EAGAIN;
46983 + break;
46984 + }
46985 +
46986 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
46987 + current->role->expires = 0;
46988 + current->role->auth_attempts = 0;
46989 + }
46990 +
46991 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
46992 + time_after(current->role->expires, get_seconds())) {
46993 + error = -EBUSY;
46994 + goto out;
46995 + }
46996 +
46997 + if (lookup_special_role_auth
46998 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
46999 + && ((!sprole_salt && !sprole_sum)
47000 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
47001 + char *p = "";
47002 + assign_special_role(gr_usermode->sp_role);
47003 + read_lock(&tasklist_lock);
47004 + if (current->real_parent)
47005 + p = current->real_parent->role->rolename;
47006 + read_unlock(&tasklist_lock);
47007 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
47008 + p, acl_sp_role_value);
47009 + } else {
47010 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
47011 + error = -EPERM;
47012 + if(!(current->role->auth_attempts++))
47013 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47014 +
47015 + goto out;
47016 + }
47017 + break;
47018 + case GR_UNSPROLE:
47019 + if (unlikely(!(gr_status & GR_READY))) {
47020 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
47021 + error = -EAGAIN;
47022 + break;
47023 + }
47024 +
47025 + if (current->role->roletype & GR_ROLE_SPECIAL) {
47026 + char *p = "";
47027 + int i = 0;
47028 +
47029 + read_lock(&tasklist_lock);
47030 + if (current->real_parent) {
47031 + p = current->real_parent->role->rolename;
47032 + i = current->real_parent->acl_role_id;
47033 + }
47034 + read_unlock(&tasklist_lock);
47035 +
47036 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
47037 + gr_set_acls(1);
47038 + } else {
47039 + error = -EPERM;
47040 + goto out;
47041 + }
47042 + break;
47043 + default:
47044 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
47045 + error = -EINVAL;
47046 + break;
47047 + }
47048 +
47049 + if (error != -EPERM)
47050 + goto out;
47051 +
47052 + if(!(gr_auth_attempts++))
47053 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47054 +
47055 + out:
47056 + mutex_unlock(&gr_dev_mutex);
47057 + return error;
47058 +}
47059 +
47060 +/* must be called with
47061 + rcu_read_lock();
47062 + read_lock(&tasklist_lock);
47063 + read_lock(&grsec_exec_file_lock);
47064 +*/
47065 +int gr_apply_subject_to_task(struct task_struct *task)
47066 +{
47067 + struct acl_object_label *obj;
47068 + char *tmpname;
47069 + struct acl_subject_label *tmpsubj;
47070 + struct file *filp;
47071 + struct name_entry *nmatch;
47072 +
47073 + filp = task->exec_file;
47074 + if (filp == NULL)
47075 + return 0;
47076 +
47077 + /* the following is to apply the correct subject
47078 + on binaries running when the RBAC system
47079 + is enabled, when the binaries have been
47080 + replaced or deleted since their execution
47081 + -----
47082 + when the RBAC system starts, the inode/dev
47083 + from exec_file will be one the RBAC system
47084 + is unaware of. It only knows the inode/dev
47085 + of the present file on disk, or the absence
47086 + of it.
47087 + */
47088 + preempt_disable();
47089 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
47090 +
47091 + nmatch = lookup_name_entry(tmpname);
47092 + preempt_enable();
47093 + tmpsubj = NULL;
47094 + if (nmatch) {
47095 + if (nmatch->deleted)
47096 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
47097 + else
47098 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
47099 + if (tmpsubj != NULL)
47100 + task->acl = tmpsubj;
47101 + }
47102 + if (tmpsubj == NULL)
47103 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
47104 + task->role);
47105 + if (task->acl) {
47106 + struct acl_subject_label *curr;
47107 + curr = task->acl;
47108 +
47109 + task->is_writable = 0;
47110 + /* ignore additional mmap checks for processes that are writable
47111 + by the default ACL */
47112 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47113 + if (unlikely(obj->mode & GR_WRITE))
47114 + task->is_writable = 1;
47115 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47116 + if (unlikely(obj->mode & GR_WRITE))
47117 + task->is_writable = 1;
47118 +
47119 + gr_set_proc_res(task);
47120 +
47121 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47122 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47123 +#endif
47124 + } else {
47125 + return 1;
47126 + }
47127 +
47128 + return 0;
47129 +}
47130 +
47131 +int
47132 +gr_set_acls(const int type)
47133 +{
47134 + struct task_struct *task, *task2;
47135 + struct acl_role_label *role = current->role;
47136 + __u16 acl_role_id = current->acl_role_id;
47137 + const struct cred *cred;
47138 + int ret;
47139 +
47140 + rcu_read_lock();
47141 + read_lock(&tasklist_lock);
47142 + read_lock(&grsec_exec_file_lock);
47143 + do_each_thread(task2, task) {
47144 + /* check to see if we're called from the exit handler,
47145 + if so, only replace ACLs that have inherited the admin
47146 + ACL */
47147 +
47148 + if (type && (task->role != role ||
47149 + task->acl_role_id != acl_role_id))
47150 + continue;
47151 +
47152 + task->acl_role_id = 0;
47153 + task->acl_sp_role = 0;
47154 +
47155 + if (task->exec_file) {
47156 + cred = __task_cred(task);
47157 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
47158 +
47159 + ret = gr_apply_subject_to_task(task);
47160 + if (ret) {
47161 + read_unlock(&grsec_exec_file_lock);
47162 + read_unlock(&tasklist_lock);
47163 + rcu_read_unlock();
47164 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
47165 + return ret;
47166 + }
47167 + } else {
47168 + // it's a kernel process
47169 + task->role = kernel_role;
47170 + task->acl = kernel_role->root_label;
47171 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
47172 + task->acl->mode &= ~GR_PROCFIND;
47173 +#endif
47174 + }
47175 + } while_each_thread(task2, task);
47176 + read_unlock(&grsec_exec_file_lock);
47177 + read_unlock(&tasklist_lock);
47178 + rcu_read_unlock();
47179 +
47180 + return 0;
47181 +}
47182 +
47183 +void
47184 +gr_learn_resource(const struct task_struct *task,
47185 + const int res, const unsigned long wanted, const int gt)
47186 +{
47187 + struct acl_subject_label *acl;
47188 + const struct cred *cred;
47189 +
47190 + if (unlikely((gr_status & GR_READY) &&
47191 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
47192 + goto skip_reslog;
47193 +
47194 +#ifdef CONFIG_GRKERNSEC_RESLOG
47195 + gr_log_resource(task, res, wanted, gt);
47196 +#endif
47197 + skip_reslog:
47198 +
47199 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
47200 + return;
47201 +
47202 + acl = task->acl;
47203 +
47204 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
47205 + !(acl->resmask & (1 << (unsigned short) res))))
47206 + return;
47207 +
47208 + if (wanted >= acl->res[res].rlim_cur) {
47209 + unsigned long res_add;
47210 +
47211 + res_add = wanted;
47212 + switch (res) {
47213 + case RLIMIT_CPU:
47214 + res_add += GR_RLIM_CPU_BUMP;
47215 + break;
47216 + case RLIMIT_FSIZE:
47217 + res_add += GR_RLIM_FSIZE_BUMP;
47218 + break;
47219 + case RLIMIT_DATA:
47220 + res_add += GR_RLIM_DATA_BUMP;
47221 + break;
47222 + case RLIMIT_STACK:
47223 + res_add += GR_RLIM_STACK_BUMP;
47224 + break;
47225 + case RLIMIT_CORE:
47226 + res_add += GR_RLIM_CORE_BUMP;
47227 + break;
47228 + case RLIMIT_RSS:
47229 + res_add += GR_RLIM_RSS_BUMP;
47230 + break;
47231 + case RLIMIT_NPROC:
47232 + res_add += GR_RLIM_NPROC_BUMP;
47233 + break;
47234 + case RLIMIT_NOFILE:
47235 + res_add += GR_RLIM_NOFILE_BUMP;
47236 + break;
47237 + case RLIMIT_MEMLOCK:
47238 + res_add += GR_RLIM_MEMLOCK_BUMP;
47239 + break;
47240 + case RLIMIT_AS:
47241 + res_add += GR_RLIM_AS_BUMP;
47242 + break;
47243 + case RLIMIT_LOCKS:
47244 + res_add += GR_RLIM_LOCKS_BUMP;
47245 + break;
47246 + case RLIMIT_SIGPENDING:
47247 + res_add += GR_RLIM_SIGPENDING_BUMP;
47248 + break;
47249 + case RLIMIT_MSGQUEUE:
47250 + res_add += GR_RLIM_MSGQUEUE_BUMP;
47251 + break;
47252 + case RLIMIT_NICE:
47253 + res_add += GR_RLIM_NICE_BUMP;
47254 + break;
47255 + case RLIMIT_RTPRIO:
47256 + res_add += GR_RLIM_RTPRIO_BUMP;
47257 + break;
47258 + case RLIMIT_RTTIME:
47259 + res_add += GR_RLIM_RTTIME_BUMP;
47260 + break;
47261 + }
47262 +
47263 + acl->res[res].rlim_cur = res_add;
47264 +
47265 + if (wanted > acl->res[res].rlim_max)
47266 + acl->res[res].rlim_max = res_add;
47267 +
47268 + /* only log the subject filename, since resource logging is supported for
47269 + single-subject learning only */
47270 + rcu_read_lock();
47271 + cred = __task_cred(task);
47272 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47273 + task->role->roletype, cred->uid, cred->gid, acl->filename,
47274 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
47275 + "", (unsigned long) res, &task->signal->saved_ip);
47276 + rcu_read_unlock();
47277 + }
47278 +
47279 + return;
47280 +}
47281 +
47282 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
47283 +void
47284 +pax_set_initial_flags(struct linux_binprm *bprm)
47285 +{
47286 + struct task_struct *task = current;
47287 + struct acl_subject_label *proc;
47288 + unsigned long flags;
47289 +
47290 + if (unlikely(!(gr_status & GR_READY)))
47291 + return;
47292 +
47293 + flags = pax_get_flags(task);
47294 +
47295 + proc = task->acl;
47296 +
47297 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
47298 + flags &= ~MF_PAX_PAGEEXEC;
47299 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
47300 + flags &= ~MF_PAX_SEGMEXEC;
47301 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
47302 + flags &= ~MF_PAX_RANDMMAP;
47303 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
47304 + flags &= ~MF_PAX_EMUTRAMP;
47305 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
47306 + flags &= ~MF_PAX_MPROTECT;
47307 +
47308 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
47309 + flags |= MF_PAX_PAGEEXEC;
47310 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
47311 + flags |= MF_PAX_SEGMEXEC;
47312 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
47313 + flags |= MF_PAX_RANDMMAP;
47314 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
47315 + flags |= MF_PAX_EMUTRAMP;
47316 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
47317 + flags |= MF_PAX_MPROTECT;
47318 +
47319 + pax_set_flags(task, flags);
47320 +
47321 + return;
47322 +}
47323 +#endif
47324 +
47325 +#ifdef CONFIG_SYSCTL
47326 +/* Eric Biederman likes breaking userland ABI and every inode-based security
47327 + system to save 35kb of memory */
47328 +
47329 +/* we modify the passed in filename, but adjust it back before returning */
47330 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
47331 +{
47332 + struct name_entry *nmatch;
47333 + char *p, *lastp = NULL;
47334 + struct acl_object_label *obj = NULL, *tmp;
47335 + struct acl_subject_label *tmpsubj;
47336 + char c = '\0';
47337 +
47338 + read_lock(&gr_inode_lock);
47339 +
47340 + p = name + len - 1;
47341 + do {
47342 + nmatch = lookup_name_entry(name);
47343 + if (lastp != NULL)
47344 + *lastp = c;
47345 +
47346 + if (nmatch == NULL)
47347 + goto next_component;
47348 + tmpsubj = current->acl;
47349 + do {
47350 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
47351 + if (obj != NULL) {
47352 + tmp = obj->globbed;
47353 + while (tmp) {
47354 + if (!glob_match(tmp->filename, name)) {
47355 + obj = tmp;
47356 + goto found_obj;
47357 + }
47358 + tmp = tmp->next;
47359 + }
47360 + goto found_obj;
47361 + }
47362 + } while ((tmpsubj = tmpsubj->parent_subject));
47363 +next_component:
47364 + /* end case */
47365 + if (p == name)
47366 + break;
47367 +
47368 + while (*p != '/')
47369 + p--;
47370 + if (p == name)
47371 + lastp = p + 1;
47372 + else {
47373 + lastp = p;
47374 + p--;
47375 + }
47376 + c = *lastp;
47377 + *lastp = '\0';
47378 + } while (1);
47379 +found_obj:
47380 + read_unlock(&gr_inode_lock);
47381 + /* obj returned will always be non-null */
47382 + return obj;
47383 +}
47384 +
47385 +/* returns 0 when allowing, non-zero on error
47386 + op of 0 is used for readdir, so we don't log the names of hidden files
47387 +*/
47388 +__u32
47389 +gr_handle_sysctl(const struct ctl_table *table, const int op)
47390 +{
47391 + ctl_table *tmp;
47392 + const char *proc_sys = "/proc/sys";
47393 + char *path;
47394 + struct acl_object_label *obj;
47395 + unsigned short len = 0, pos = 0, depth = 0, i;
47396 + __u32 err = 0;
47397 + __u32 mode = 0;
47398 +
47399 + if (unlikely(!(gr_status & GR_READY)))
47400 + return 0;
47401 +
47402 + /* for now, ignore operations on non-sysctl entries if it's not a
47403 + readdir*/
47404 + if (table->child != NULL && op != 0)
47405 + return 0;
47406 +
47407 + mode |= GR_FIND;
47408 + /* it's only a read if it's an entry, read on dirs is for readdir */
47409 + if (op & MAY_READ)
47410 + mode |= GR_READ;
47411 + if (op & MAY_WRITE)
47412 + mode |= GR_WRITE;
47413 +
47414 + preempt_disable();
47415 +
47416 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
47417 +
47418 + /* it's only a read/write if it's an actual entry, not a dir
47419 + (which are opened for readdir)
47420 + */
47421 +
47422 + /* convert the requested sysctl entry into a pathname */
47423 +
47424 + for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47425 + len += strlen(tmp->procname);
47426 + len++;
47427 + depth++;
47428 + }
47429 +
47430 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
47431 + /* deny */
47432 + goto out;
47433 + }
47434 +
47435 + memset(path, 0, PAGE_SIZE);
47436 +
47437 + memcpy(path, proc_sys, strlen(proc_sys));
47438 +
47439 + pos += strlen(proc_sys);
47440 +
47441 + for (; depth > 0; depth--) {
47442 + path[pos] = '/';
47443 + pos++;
47444 + for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
47445 + if (depth == i) {
47446 + memcpy(path + pos, tmp->procname,
47447 + strlen(tmp->procname));
47448 + pos += strlen(tmp->procname);
47449 + }
47450 + i++;
47451 + }
47452 + }
47453 +
47454 + obj = gr_lookup_by_name(path, pos);
47455 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
47456 +
47457 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
47458 + ((err & mode) != mode))) {
47459 + __u32 new_mode = mode;
47460 +
47461 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
47462 +
47463 + err = 0;
47464 + gr_log_learn_sysctl(path, new_mode);
47465 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
47466 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
47467 + err = -ENOENT;
47468 + } else if (!(err & GR_FIND)) {
47469 + err = -ENOENT;
47470 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
47471 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
47472 + path, (mode & GR_READ) ? " reading" : "",
47473 + (mode & GR_WRITE) ? " writing" : "");
47474 + err = -EACCES;
47475 + } else if ((err & mode) != mode) {
47476 + err = -EACCES;
47477 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
47478 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
47479 + path, (mode & GR_READ) ? " reading" : "",
47480 + (mode & GR_WRITE) ? " writing" : "");
47481 + err = 0;
47482 + } else
47483 + err = 0;
47484 +
47485 + out:
47486 + preempt_enable();
47487 +
47488 + return err;
47489 +}
47490 +#endif
47491 +
47492 +int
47493 +gr_handle_proc_ptrace(struct task_struct *task)
47494 +{
47495 + struct file *filp;
47496 + struct task_struct *tmp = task;
47497 + struct task_struct *curtemp = current;
47498 + __u32 retmode;
47499 +
47500 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47501 + if (unlikely(!(gr_status & GR_READY)))
47502 + return 0;
47503 +#endif
47504 +
47505 + read_lock(&tasklist_lock);
47506 + read_lock(&grsec_exec_file_lock);
47507 + filp = task->exec_file;
47508 +
47509 + while (tmp->pid > 0) {
47510 + if (tmp == curtemp)
47511 + break;
47512 + tmp = tmp->real_parent;
47513 + }
47514 +
47515 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47516 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
47517 + read_unlock(&grsec_exec_file_lock);
47518 + read_unlock(&tasklist_lock);
47519 + return 1;
47520 + }
47521 +
47522 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47523 + if (!(gr_status & GR_READY)) {
47524 + read_unlock(&grsec_exec_file_lock);
47525 + read_unlock(&tasklist_lock);
47526 + return 0;
47527 + }
47528 +#endif
47529 +
47530 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
47531 + read_unlock(&grsec_exec_file_lock);
47532 + read_unlock(&tasklist_lock);
47533 +
47534 + if (retmode & GR_NOPTRACE)
47535 + return 1;
47536 +
47537 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
47538 + && (current->acl != task->acl || (current->acl != current->role->root_label
47539 + && current->pid != task->pid)))
47540 + return 1;
47541 +
47542 + return 0;
47543 +}
47544 +
47545 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
47546 +{
47547 + if (unlikely(!(gr_status & GR_READY)))
47548 + return;
47549 +
47550 + if (!(current->role->roletype & GR_ROLE_GOD))
47551 + return;
47552 +
47553 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
47554 + p->role->rolename, gr_task_roletype_to_char(p),
47555 + p->acl->filename);
47556 +}
47557 +
47558 +int
47559 +gr_handle_ptrace(struct task_struct *task, const long request)
47560 +{
47561 + struct task_struct *tmp = task;
47562 + struct task_struct *curtemp = current;
47563 + __u32 retmode;
47564 +
47565 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
47566 + if (unlikely(!(gr_status & GR_READY)))
47567 + return 0;
47568 +#endif
47569 +
47570 + read_lock(&tasklist_lock);
47571 + while (tmp->pid > 0) {
47572 + if (tmp == curtemp)
47573 + break;
47574 + tmp = tmp->real_parent;
47575 + }
47576 +
47577 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
47578 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
47579 + read_unlock(&tasklist_lock);
47580 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47581 + return 1;
47582 + }
47583 + read_unlock(&tasklist_lock);
47584 +
47585 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
47586 + if (!(gr_status & GR_READY))
47587 + return 0;
47588 +#endif
47589 +
47590 + read_lock(&grsec_exec_file_lock);
47591 + if (unlikely(!task->exec_file)) {
47592 + read_unlock(&grsec_exec_file_lock);
47593 + return 0;
47594 + }
47595 +
47596 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
47597 + read_unlock(&grsec_exec_file_lock);
47598 +
47599 + if (retmode & GR_NOPTRACE) {
47600 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47601 + return 1;
47602 + }
47603 +
47604 + if (retmode & GR_PTRACERD) {
47605 + switch (request) {
47606 + case PTRACE_POKETEXT:
47607 + case PTRACE_POKEDATA:
47608 + case PTRACE_POKEUSR:
47609 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
47610 + case PTRACE_SETREGS:
47611 + case PTRACE_SETFPREGS:
47612 +#endif
47613 +#ifdef CONFIG_X86
47614 + case PTRACE_SETFPXREGS:
47615 +#endif
47616 +#ifdef CONFIG_ALTIVEC
47617 + case PTRACE_SETVRREGS:
47618 +#endif
47619 + return 1;
47620 + default:
47621 + return 0;
47622 + }
47623 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
47624 + !(current->role->roletype & GR_ROLE_GOD) &&
47625 + (current->acl != task->acl)) {
47626 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
47627 + return 1;
47628 + }
47629 +
47630 + return 0;
47631 +}
47632 +
47633 +static int is_writable_mmap(const struct file *filp)
47634 +{
47635 + struct task_struct *task = current;
47636 + struct acl_object_label *obj, *obj2;
47637 +
47638 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
47639 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
47640 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47641 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
47642 + task->role->root_label);
47643 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
47644 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
47645 + return 1;
47646 + }
47647 + }
47648 + return 0;
47649 +}
47650 +
47651 +int
47652 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
47653 +{
47654 + __u32 mode;
47655 +
47656 + if (unlikely(!file || !(prot & PROT_EXEC)))
47657 + return 1;
47658 +
47659 + if (is_writable_mmap(file))
47660 + return 0;
47661 +
47662 + mode =
47663 + gr_search_file(file->f_path.dentry,
47664 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47665 + file->f_path.mnt);
47666 +
47667 + if (!gr_tpe_allow(file))
47668 + return 0;
47669 +
47670 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47671 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47672 + return 0;
47673 + } else if (unlikely(!(mode & GR_EXEC))) {
47674 + return 0;
47675 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47676 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47677 + return 1;
47678 + }
47679 +
47680 + return 1;
47681 +}
47682 +
47683 +int
47684 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47685 +{
47686 + __u32 mode;
47687 +
47688 + if (unlikely(!file || !(prot & PROT_EXEC)))
47689 + return 1;
47690 +
47691 + if (is_writable_mmap(file))
47692 + return 0;
47693 +
47694 + mode =
47695 + gr_search_file(file->f_path.dentry,
47696 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
47697 + file->f_path.mnt);
47698 +
47699 + if (!gr_tpe_allow(file))
47700 + return 0;
47701 +
47702 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
47703 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47704 + return 0;
47705 + } else if (unlikely(!(mode & GR_EXEC))) {
47706 + return 0;
47707 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
47708 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
47709 + return 1;
47710 + }
47711 +
47712 + return 1;
47713 +}
47714 +
47715 +void
47716 +gr_acl_handle_psacct(struct task_struct *task, const long code)
47717 +{
47718 + unsigned long runtime;
47719 + unsigned long cputime;
47720 + unsigned int wday, cday;
47721 + __u8 whr, chr;
47722 + __u8 wmin, cmin;
47723 + __u8 wsec, csec;
47724 + struct timespec timeval;
47725 +
47726 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
47727 + !(task->acl->mode & GR_PROCACCT)))
47728 + return;
47729 +
47730 + do_posix_clock_monotonic_gettime(&timeval);
47731 + runtime = timeval.tv_sec - task->start_time.tv_sec;
47732 + wday = runtime / (3600 * 24);
47733 + runtime -= wday * (3600 * 24);
47734 + whr = runtime / 3600;
47735 + runtime -= whr * 3600;
47736 + wmin = runtime / 60;
47737 + runtime -= wmin * 60;
47738 + wsec = runtime;
47739 +
47740 + cputime = (task->utime + task->stime) / HZ;
47741 + cday = cputime / (3600 * 24);
47742 + cputime -= cday * (3600 * 24);
47743 + chr = cputime / 3600;
47744 + cputime -= chr * 3600;
47745 + cmin = cputime / 60;
47746 + cputime -= cmin * 60;
47747 + csec = cputime;
47748 +
47749 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
47750 +
47751 + return;
47752 +}
47753 +
47754 +void gr_set_kernel_label(struct task_struct *task)
47755 +{
47756 + if (gr_status & GR_READY) {
47757 + task->role = kernel_role;
47758 + task->acl = kernel_role->root_label;
47759 + }
47760 + return;
47761 +}
47762 +
47763 +#ifdef CONFIG_TASKSTATS
47764 +int gr_is_taskstats_denied(int pid)
47765 +{
47766 + struct task_struct *task;
47767 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47768 + const struct cred *cred;
47769 +#endif
47770 + int ret = 0;
47771 +
47772 + /* restrict taskstats viewing to un-chrooted root users
47773 + who have the 'view' subject flag if the RBAC system is enabled
47774 + */
47775 +
47776 + rcu_read_lock();
47777 + read_lock(&tasklist_lock);
47778 + task = find_task_by_vpid(pid);
47779 + if (task) {
47780 +#ifdef CONFIG_GRKERNSEC_CHROOT
47781 + if (proc_is_chrooted(task))
47782 + ret = -EACCES;
47783 +#endif
47784 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47785 + cred = __task_cred(task);
47786 +#ifdef CONFIG_GRKERNSEC_PROC_USER
47787 + if (cred->uid != 0)
47788 + ret = -EACCES;
47789 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
47790 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
47791 + ret = -EACCES;
47792 +#endif
47793 +#endif
47794 + if (gr_status & GR_READY) {
47795 + if (!(task->acl->mode & GR_VIEW))
47796 + ret = -EACCES;
47797 + }
47798 + } else
47799 + ret = -ENOENT;
47800 +
47801 + read_unlock(&tasklist_lock);
47802 + rcu_read_unlock();
47803 +
47804 + return ret;
47805 +}
47806 +#endif
47807 +
47808 +/* AUXV entries are filled via a descendant of search_binary_handler
47809 + after we've already applied the subject for the target
47810 +*/
47811 +int gr_acl_enable_at_secure(void)
47812 +{
47813 + if (unlikely(!(gr_status & GR_READY)))
47814 + return 0;
47815 +
47816 + if (current->acl->mode & GR_ATSECURE)
47817 + return 1;
47818 +
47819 + return 0;
47820 +}
47821 +
47822 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
47823 +{
47824 + struct task_struct *task = current;
47825 + struct dentry *dentry = file->f_path.dentry;
47826 + struct vfsmount *mnt = file->f_path.mnt;
47827 + struct acl_object_label *obj, *tmp;
47828 + struct acl_subject_label *subj;
47829 + unsigned int bufsize;
47830 + int is_not_root;
47831 + char *path;
47832 + dev_t dev = __get_dev(dentry);
47833 +
47834 + if (unlikely(!(gr_status & GR_READY)))
47835 + return 1;
47836 +
47837 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
47838 + return 1;
47839 +
47840 + /* ignore Eric Biederman */
47841 + if (IS_PRIVATE(dentry->d_inode))
47842 + return 1;
47843 +
47844 + subj = task->acl;
47845 + do {
47846 + obj = lookup_acl_obj_label(ino, dev, subj);
47847 + if (obj != NULL)
47848 + return (obj->mode & GR_FIND) ? 1 : 0;
47849 + } while ((subj = subj->parent_subject));
47850 +
47851 + /* this is purely an optimization since we're looking for an object
47852 + for the directory we're doing a readdir on
47853 + if it's possible for any globbed object to match the entry we're
47854 + filling into the directory, then the object we find here will be
47855 + an anchor point with attached globbed objects
47856 + */
47857 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
47858 + if (obj->globbed == NULL)
47859 + return (obj->mode & GR_FIND) ? 1 : 0;
47860 +
47861 + is_not_root = ((obj->filename[0] == '/') &&
47862 + (obj->filename[1] == '\0')) ? 0 : 1;
47863 + bufsize = PAGE_SIZE - namelen - is_not_root;
47864 +
47865 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
47866 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
47867 + return 1;
47868 +
47869 + preempt_disable();
47870 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47871 + bufsize);
47872 +
47873 + bufsize = strlen(path);
47874 +
47875 + /* if base is "/", don't append an additional slash */
47876 + if (is_not_root)
47877 + *(path + bufsize) = '/';
47878 + memcpy(path + bufsize + is_not_root, name, namelen);
47879 + *(path + bufsize + namelen + is_not_root) = '\0';
47880 +
47881 + tmp = obj->globbed;
47882 + while (tmp) {
47883 + if (!glob_match(tmp->filename, path)) {
47884 + preempt_enable();
47885 + return (tmp->mode & GR_FIND) ? 1 : 0;
47886 + }
47887 + tmp = tmp->next;
47888 + }
47889 + preempt_enable();
47890 + return (obj->mode & GR_FIND) ? 1 : 0;
47891 +}
47892 +
47893 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
47894 +EXPORT_SYMBOL(gr_acl_is_enabled);
47895 +#endif
47896 +EXPORT_SYMBOL(gr_learn_resource);
47897 +EXPORT_SYMBOL(gr_set_kernel_label);
47898 +#ifdef CONFIG_SECURITY
47899 +EXPORT_SYMBOL(gr_check_user_change);
47900 +EXPORT_SYMBOL(gr_check_group_change);
47901 +#endif
47902 +
47903 diff -urNp linux-2.6.32.42/grsecurity/gracl_cap.c linux-2.6.32.42/grsecurity/gracl_cap.c
47904 --- linux-2.6.32.42/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
47905 +++ linux-2.6.32.42/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
47906 @@ -0,0 +1,138 @@
47907 +#include <linux/kernel.h>
47908 +#include <linux/module.h>
47909 +#include <linux/sched.h>
47910 +#include <linux/gracl.h>
47911 +#include <linux/grsecurity.h>
47912 +#include <linux/grinternal.h>
47913 +
47914 +static const char *captab_log[] = {
47915 + "CAP_CHOWN",
47916 + "CAP_DAC_OVERRIDE",
47917 + "CAP_DAC_READ_SEARCH",
47918 + "CAP_FOWNER",
47919 + "CAP_FSETID",
47920 + "CAP_KILL",
47921 + "CAP_SETGID",
47922 + "CAP_SETUID",
47923 + "CAP_SETPCAP",
47924 + "CAP_LINUX_IMMUTABLE",
47925 + "CAP_NET_BIND_SERVICE",
47926 + "CAP_NET_BROADCAST",
47927 + "CAP_NET_ADMIN",
47928 + "CAP_NET_RAW",
47929 + "CAP_IPC_LOCK",
47930 + "CAP_IPC_OWNER",
47931 + "CAP_SYS_MODULE",
47932 + "CAP_SYS_RAWIO",
47933 + "CAP_SYS_CHROOT",
47934 + "CAP_SYS_PTRACE",
47935 + "CAP_SYS_PACCT",
47936 + "CAP_SYS_ADMIN",
47937 + "CAP_SYS_BOOT",
47938 + "CAP_SYS_NICE",
47939 + "CAP_SYS_RESOURCE",
47940 + "CAP_SYS_TIME",
47941 + "CAP_SYS_TTY_CONFIG",
47942 + "CAP_MKNOD",
47943 + "CAP_LEASE",
47944 + "CAP_AUDIT_WRITE",
47945 + "CAP_AUDIT_CONTROL",
47946 + "CAP_SETFCAP",
47947 + "CAP_MAC_OVERRIDE",
47948 + "CAP_MAC_ADMIN"
47949 +};
47950 +
47951 +EXPORT_SYMBOL(gr_is_capable);
47952 +EXPORT_SYMBOL(gr_is_capable_nolog);
47953 +
47954 +int
47955 +gr_is_capable(const int cap)
47956 +{
47957 + struct task_struct *task = current;
47958 + const struct cred *cred = current_cred();
47959 + struct acl_subject_label *curracl;
47960 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
47961 + kernel_cap_t cap_audit = __cap_empty_set;
47962 +
47963 + if (!gr_acl_is_enabled())
47964 + return 1;
47965 +
47966 + curracl = task->acl;
47967 +
47968 + cap_drop = curracl->cap_lower;
47969 + cap_mask = curracl->cap_mask;
47970 + cap_audit = curracl->cap_invert_audit;
47971 +
47972 + while ((curracl = curracl->parent_subject)) {
47973 + /* if the cap isn't specified in the current computed mask but is specified in the
47974 + current level subject, and is lowered in the current level subject, then add
47975 + it to the set of dropped capabilities
47976 + otherwise, add the current level subject's mask to the current computed mask
47977 + */
47978 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
47979 + cap_raise(cap_mask, cap);
47980 + if (cap_raised(curracl->cap_lower, cap))
47981 + cap_raise(cap_drop, cap);
47982 + if (cap_raised(curracl->cap_invert_audit, cap))
47983 + cap_raise(cap_audit, cap);
47984 + }
47985 + }
47986 +
47987 + if (!cap_raised(cap_drop, cap)) {
47988 + if (cap_raised(cap_audit, cap))
47989 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
47990 + return 1;
47991 + }
47992 +
47993 + curracl = task->acl;
47994 +
47995 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
47996 + && cap_raised(cred->cap_effective, cap)) {
47997 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
47998 + task->role->roletype, cred->uid,
47999 + cred->gid, task->exec_file ?
48000 + gr_to_filename(task->exec_file->f_path.dentry,
48001 + task->exec_file->f_path.mnt) : curracl->filename,
48002 + curracl->filename, 0UL,
48003 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
48004 + return 1;
48005 + }
48006 +
48007 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
48008 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
48009 + return 0;
48010 +}
48011 +
48012 +int
48013 +gr_is_capable_nolog(const int cap)
48014 +{
48015 + struct acl_subject_label *curracl;
48016 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48017 +
48018 + if (!gr_acl_is_enabled())
48019 + return 1;
48020 +
48021 + curracl = current->acl;
48022 +
48023 + cap_drop = curracl->cap_lower;
48024 + cap_mask = curracl->cap_mask;
48025 +
48026 + while ((curracl = curracl->parent_subject)) {
48027 + /* if the cap isn't specified in the current computed mask but is specified in the
48028 + current level subject, and is lowered in the current level subject, then add
48029 + it to the set of dropped capabilities
48030 + otherwise, add the current level subject's mask to the current computed mask
48031 + */
48032 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48033 + cap_raise(cap_mask, cap);
48034 + if (cap_raised(curracl->cap_lower, cap))
48035 + cap_raise(cap_drop, cap);
48036 + }
48037 + }
48038 +
48039 + if (!cap_raised(cap_drop, cap))
48040 + return 1;
48041 +
48042 + return 0;
48043 +}
48044 +
48045 diff -urNp linux-2.6.32.42/grsecurity/gracl_fs.c linux-2.6.32.42/grsecurity/gracl_fs.c
48046 --- linux-2.6.32.42/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
48047 +++ linux-2.6.32.42/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
48048 @@ -0,0 +1,431 @@
48049 +#include <linux/kernel.h>
48050 +#include <linux/sched.h>
48051 +#include <linux/types.h>
48052 +#include <linux/fs.h>
48053 +#include <linux/file.h>
48054 +#include <linux/stat.h>
48055 +#include <linux/grsecurity.h>
48056 +#include <linux/grinternal.h>
48057 +#include <linux/gracl.h>
48058 +
48059 +__u32
48060 +gr_acl_handle_hidden_file(const struct dentry * dentry,
48061 + const struct vfsmount * mnt)
48062 +{
48063 + __u32 mode;
48064 +
48065 + if (unlikely(!dentry->d_inode))
48066 + return GR_FIND;
48067 +
48068 + mode =
48069 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
48070 +
48071 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
48072 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48073 + return mode;
48074 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
48075 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48076 + return 0;
48077 + } else if (unlikely(!(mode & GR_FIND)))
48078 + return 0;
48079 +
48080 + return GR_FIND;
48081 +}
48082 +
48083 +__u32
48084 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
48085 + const int fmode)
48086 +{
48087 + __u32 reqmode = GR_FIND;
48088 + __u32 mode;
48089 +
48090 + if (unlikely(!dentry->d_inode))
48091 + return reqmode;
48092 +
48093 + if (unlikely(fmode & O_APPEND))
48094 + reqmode |= GR_APPEND;
48095 + else if (unlikely(fmode & FMODE_WRITE))
48096 + reqmode |= GR_WRITE;
48097 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48098 + reqmode |= GR_READ;
48099 + if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
48100 + reqmode &= ~GR_READ;
48101 + mode =
48102 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48103 + mnt);
48104 +
48105 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48106 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48107 + reqmode & GR_READ ? " reading" : "",
48108 + reqmode & GR_WRITE ? " writing" : reqmode &
48109 + GR_APPEND ? " appending" : "");
48110 + return reqmode;
48111 + } else
48112 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48113 + {
48114 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48115 + reqmode & GR_READ ? " reading" : "",
48116 + reqmode & GR_WRITE ? " writing" : reqmode &
48117 + GR_APPEND ? " appending" : "");
48118 + return 0;
48119 + } else if (unlikely((mode & reqmode) != reqmode))
48120 + return 0;
48121 +
48122 + return reqmode;
48123 +}
48124 +
48125 +__u32
48126 +gr_acl_handle_creat(const struct dentry * dentry,
48127 + const struct dentry * p_dentry,
48128 + const struct vfsmount * p_mnt, const int fmode,
48129 + const int imode)
48130 +{
48131 + __u32 reqmode = GR_WRITE | GR_CREATE;
48132 + __u32 mode;
48133 +
48134 + if (unlikely(fmode & O_APPEND))
48135 + reqmode |= GR_APPEND;
48136 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48137 + reqmode |= GR_READ;
48138 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
48139 + reqmode |= GR_SETID;
48140 +
48141 + mode =
48142 + gr_check_create(dentry, p_dentry, p_mnt,
48143 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48144 +
48145 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48146 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48147 + reqmode & GR_READ ? " reading" : "",
48148 + reqmode & GR_WRITE ? " writing" : reqmode &
48149 + GR_APPEND ? " appending" : "");
48150 + return reqmode;
48151 + } else
48152 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48153 + {
48154 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48155 + reqmode & GR_READ ? " reading" : "",
48156 + reqmode & GR_WRITE ? " writing" : reqmode &
48157 + GR_APPEND ? " appending" : "");
48158 + return 0;
48159 + } else if (unlikely((mode & reqmode) != reqmode))
48160 + return 0;
48161 +
48162 + return reqmode;
48163 +}
48164 +
48165 +__u32
48166 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
48167 + const int fmode)
48168 +{
48169 + __u32 mode, reqmode = GR_FIND;
48170 +
48171 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
48172 + reqmode |= GR_EXEC;
48173 + if (fmode & S_IWOTH)
48174 + reqmode |= GR_WRITE;
48175 + if (fmode & S_IROTH)
48176 + reqmode |= GR_READ;
48177 +
48178 + mode =
48179 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48180 + mnt);
48181 +
48182 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48183 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48184 + reqmode & GR_READ ? " reading" : "",
48185 + reqmode & GR_WRITE ? " writing" : "",
48186 + reqmode & GR_EXEC ? " executing" : "");
48187 + return reqmode;
48188 + } else
48189 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48190 + {
48191 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48192 + reqmode & GR_READ ? " reading" : "",
48193 + reqmode & GR_WRITE ? " writing" : "",
48194 + reqmode & GR_EXEC ? " executing" : "");
48195 + return 0;
48196 + } else if (unlikely((mode & reqmode) != reqmode))
48197 + return 0;
48198 +
48199 + return reqmode;
48200 +}
48201 +
48202 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
48203 +{
48204 + __u32 mode;
48205 +
48206 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
48207 +
48208 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48209 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
48210 + return mode;
48211 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48212 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
48213 + return 0;
48214 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
48215 + return 0;
48216 +
48217 + return (reqmode);
48218 +}
48219 +
48220 +__u32
48221 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
48222 +{
48223 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
48224 +}
48225 +
48226 +__u32
48227 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
48228 +{
48229 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
48230 +}
48231 +
48232 +__u32
48233 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
48234 +{
48235 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
48236 +}
48237 +
48238 +__u32
48239 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
48240 +{
48241 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
48242 +}
48243 +
48244 +__u32
48245 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
48246 + mode_t mode)
48247 +{
48248 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
48249 + return 1;
48250 +
48251 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48252 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48253 + GR_FCHMOD_ACL_MSG);
48254 + } else {
48255 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
48256 + }
48257 +}
48258 +
48259 +__u32
48260 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
48261 + mode_t mode)
48262 +{
48263 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48264 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48265 + GR_CHMOD_ACL_MSG);
48266 + } else {
48267 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
48268 + }
48269 +}
48270 +
48271 +__u32
48272 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
48273 +{
48274 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
48275 +}
48276 +
48277 +__u32
48278 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
48279 +{
48280 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
48281 +}
48282 +
48283 +__u32
48284 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
48285 +{
48286 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
48287 +}
48288 +
48289 +__u32
48290 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
48291 +{
48292 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
48293 + GR_UNIXCONNECT_ACL_MSG);
48294 +}
48295 +
48296 +/* hardlinks require at minimum create permission,
48297 + any additional privilege required is based on the
48298 + privilege of the file being linked to
48299 +*/
48300 +__u32
48301 +gr_acl_handle_link(const struct dentry * new_dentry,
48302 + const struct dentry * parent_dentry,
48303 + const struct vfsmount * parent_mnt,
48304 + const struct dentry * old_dentry,
48305 + const struct vfsmount * old_mnt, const char *to)
48306 +{
48307 + __u32 mode;
48308 + __u32 needmode = GR_CREATE | GR_LINK;
48309 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
48310 +
48311 + mode =
48312 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
48313 + old_mnt);
48314 +
48315 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
48316 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48317 + return mode;
48318 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48319 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
48320 + return 0;
48321 + } else if (unlikely((mode & needmode) != needmode))
48322 + return 0;
48323 +
48324 + return 1;
48325 +}
48326 +
48327 +__u32
48328 +gr_acl_handle_symlink(const struct dentry * new_dentry,
48329 + const struct dentry * parent_dentry,
48330 + const struct vfsmount * parent_mnt, const char *from)
48331 +{
48332 + __u32 needmode = GR_WRITE | GR_CREATE;
48333 + __u32 mode;
48334 +
48335 + mode =
48336 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
48337 + GR_CREATE | GR_AUDIT_CREATE |
48338 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
48339 +
48340 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
48341 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48342 + return mode;
48343 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
48344 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
48345 + return 0;
48346 + } else if (unlikely((mode & needmode) != needmode))
48347 + return 0;
48348 +
48349 + return (GR_WRITE | GR_CREATE);
48350 +}
48351 +
48352 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
48353 +{
48354 + __u32 mode;
48355 +
48356 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48357 +
48358 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48359 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
48360 + return mode;
48361 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48362 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
48363 + return 0;
48364 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
48365 + return 0;
48366 +
48367 + return (reqmode);
48368 +}
48369 +
48370 +__u32
48371 +gr_acl_handle_mknod(const struct dentry * new_dentry,
48372 + const struct dentry * parent_dentry,
48373 + const struct vfsmount * parent_mnt,
48374 + const int mode)
48375 +{
48376 + __u32 reqmode = GR_WRITE | GR_CREATE;
48377 + if (unlikely(mode & (S_ISUID | S_ISGID)))
48378 + reqmode |= GR_SETID;
48379 +
48380 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48381 + reqmode, GR_MKNOD_ACL_MSG);
48382 +}
48383 +
48384 +__u32
48385 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
48386 + const struct dentry *parent_dentry,
48387 + const struct vfsmount *parent_mnt)
48388 +{
48389 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
48390 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
48391 +}
48392 +
48393 +#define RENAME_CHECK_SUCCESS(old, new) \
48394 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
48395 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
48396 +
48397 +int
48398 +gr_acl_handle_rename(struct dentry *new_dentry,
48399 + struct dentry *parent_dentry,
48400 + const struct vfsmount *parent_mnt,
48401 + struct dentry *old_dentry,
48402 + struct inode *old_parent_inode,
48403 + struct vfsmount *old_mnt, const char *newname)
48404 +{
48405 + __u32 comp1, comp2;
48406 + int error = 0;
48407 +
48408 + if (unlikely(!gr_acl_is_enabled()))
48409 + return 0;
48410 +
48411 + if (!new_dentry->d_inode) {
48412 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
48413 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
48414 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
48415 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
48416 + GR_DELETE | GR_AUDIT_DELETE |
48417 + GR_AUDIT_READ | GR_AUDIT_WRITE |
48418 + GR_SUPPRESS, old_mnt);
48419 + } else {
48420 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
48421 + GR_CREATE | GR_DELETE |
48422 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
48423 + GR_AUDIT_READ | GR_AUDIT_WRITE |
48424 + GR_SUPPRESS, parent_mnt);
48425 + comp2 =
48426 + gr_search_file(old_dentry,
48427 + GR_READ | GR_WRITE | GR_AUDIT_READ |
48428 + GR_DELETE | GR_AUDIT_DELETE |
48429 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
48430 + }
48431 +
48432 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
48433 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
48434 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48435 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
48436 + && !(comp2 & GR_SUPPRESS)) {
48437 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
48438 + error = -EACCES;
48439 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
48440 + error = -EACCES;
48441 +
48442 + return error;
48443 +}
48444 +
48445 +void
48446 +gr_acl_handle_exit(void)
48447 +{
48448 + u16 id;
48449 + char *rolename;
48450 + struct file *exec_file;
48451 +
48452 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
48453 + !(current->role->roletype & GR_ROLE_PERSIST))) {
48454 + id = current->acl_role_id;
48455 + rolename = current->role->rolename;
48456 + gr_set_acls(1);
48457 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
48458 + }
48459 +
48460 + write_lock(&grsec_exec_file_lock);
48461 + exec_file = current->exec_file;
48462 + current->exec_file = NULL;
48463 + write_unlock(&grsec_exec_file_lock);
48464 +
48465 + if (exec_file)
48466 + fput(exec_file);
48467 +}
48468 +
48469 +int
48470 +gr_acl_handle_procpidmem(const struct task_struct *task)
48471 +{
48472 + if (unlikely(!gr_acl_is_enabled()))
48473 + return 0;
48474 +
48475 + if (task != current && task->acl->mode & GR_PROTPROCFD)
48476 + return -EACCES;
48477 +
48478 + return 0;
48479 +}
48480 diff -urNp linux-2.6.32.42/grsecurity/gracl_ip.c linux-2.6.32.42/grsecurity/gracl_ip.c
48481 --- linux-2.6.32.42/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
48482 +++ linux-2.6.32.42/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
48483 @@ -0,0 +1,382 @@
48484 +#include <linux/kernel.h>
48485 +#include <asm/uaccess.h>
48486 +#include <asm/errno.h>
48487 +#include <net/sock.h>
48488 +#include <linux/file.h>
48489 +#include <linux/fs.h>
48490 +#include <linux/net.h>
48491 +#include <linux/in.h>
48492 +#include <linux/skbuff.h>
48493 +#include <linux/ip.h>
48494 +#include <linux/udp.h>
48495 +#include <linux/smp_lock.h>
48496 +#include <linux/types.h>
48497 +#include <linux/sched.h>
48498 +#include <linux/netdevice.h>
48499 +#include <linux/inetdevice.h>
48500 +#include <linux/gracl.h>
48501 +#include <linux/grsecurity.h>
48502 +#include <linux/grinternal.h>
48503 +
48504 +#define GR_BIND 0x01
48505 +#define GR_CONNECT 0x02
48506 +#define GR_INVERT 0x04
48507 +#define GR_BINDOVERRIDE 0x08
48508 +#define GR_CONNECTOVERRIDE 0x10
48509 +#define GR_SOCK_FAMILY 0x20
48510 +
48511 +static const char * gr_protocols[IPPROTO_MAX] = {
48512 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
48513 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
48514 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
48515 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
48516 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
48517 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
48518 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
48519 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
48520 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
48521 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
48522 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
48523 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
48524 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
48525 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
48526 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
48527 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
48528 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
48529 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
48530 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
48531 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
48532 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
48533 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
48534 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
48535 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
48536 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
48537 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
48538 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
48539 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
48540 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
48541 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
48542 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
48543 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
48544 + };
48545 +
48546 +static const char * gr_socktypes[SOCK_MAX] = {
48547 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
48548 + "unknown:7", "unknown:8", "unknown:9", "packet"
48549 + };
48550 +
48551 +static const char * gr_sockfamilies[AF_MAX+1] = {
48552 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
48553 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
48554 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
48555 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
48556 + };
48557 +
48558 +const char *
48559 +gr_proto_to_name(unsigned char proto)
48560 +{
48561 + return gr_protocols[proto];
48562 +}
48563 +
48564 +const char *
48565 +gr_socktype_to_name(unsigned char type)
48566 +{
48567 + return gr_socktypes[type];
48568 +}
48569 +
48570 +const char *
48571 +gr_sockfamily_to_name(unsigned char family)
48572 +{
48573 + return gr_sockfamilies[family];
48574 +}
48575 +
48576 +int
48577 +gr_search_socket(const int domain, const int type, const int protocol)
48578 +{
48579 + struct acl_subject_label *curr;
48580 + const struct cred *cred = current_cred();
48581 +
48582 + if (unlikely(!gr_acl_is_enabled()))
48583 + goto exit;
48584 +
48585 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
48586 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
48587 + goto exit; // let the kernel handle it
48588 +
48589 + curr = current->acl;
48590 +
48591 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
48592 + /* the family is allowed, if this is PF_INET allow it only if
48593 + the extra sock type/protocol checks pass */
48594 + if (domain == PF_INET)
48595 + goto inet_check;
48596 + goto exit;
48597 + } else {
48598 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48599 + __u32 fakeip = 0;
48600 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48601 + current->role->roletype, cred->uid,
48602 + cred->gid, current->exec_file ?
48603 + gr_to_filename(current->exec_file->f_path.dentry,
48604 + current->exec_file->f_path.mnt) :
48605 + curr->filename, curr->filename,
48606 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
48607 + &current->signal->saved_ip);
48608 + goto exit;
48609 + }
48610 + goto exit_fail;
48611 + }
48612 +
48613 +inet_check:
48614 + /* the rest of this checking is for IPv4 only */
48615 + if (!curr->ips)
48616 + goto exit;
48617 +
48618 + if ((curr->ip_type & (1 << type)) &&
48619 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
48620 + goto exit;
48621 +
48622 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48623 + /* we don't place acls on raw sockets , and sometimes
48624 + dgram/ip sockets are opened for ioctl and not
48625 + bind/connect, so we'll fake a bind learn log */
48626 + if (type == SOCK_RAW || type == SOCK_PACKET) {
48627 + __u32 fakeip = 0;
48628 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48629 + current->role->roletype, cred->uid,
48630 + cred->gid, current->exec_file ?
48631 + gr_to_filename(current->exec_file->f_path.dentry,
48632 + current->exec_file->f_path.mnt) :
48633 + curr->filename, curr->filename,
48634 + &fakeip, 0, type,
48635 + protocol, GR_CONNECT, &current->signal->saved_ip);
48636 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
48637 + __u32 fakeip = 0;
48638 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48639 + current->role->roletype, cred->uid,
48640 + cred->gid, current->exec_file ?
48641 + gr_to_filename(current->exec_file->f_path.dentry,
48642 + current->exec_file->f_path.mnt) :
48643 + curr->filename, curr->filename,
48644 + &fakeip, 0, type,
48645 + protocol, GR_BIND, &current->signal->saved_ip);
48646 + }
48647 + /* we'll log when they use connect or bind */
48648 + goto exit;
48649 + }
48650 +
48651 +exit_fail:
48652 + if (domain == PF_INET)
48653 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
48654 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
48655 + else
48656 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
48657 + gr_socktype_to_name(type), protocol);
48658 +
48659 + return 0;
48660 +exit:
48661 + return 1;
48662 +}
48663 +
48664 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
48665 +{
48666 + if ((ip->mode & mode) &&
48667 + (ip_port >= ip->low) &&
48668 + (ip_port <= ip->high) &&
48669 + ((ntohl(ip_addr) & our_netmask) ==
48670 + (ntohl(our_addr) & our_netmask))
48671 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
48672 + && (ip->type & (1 << type))) {
48673 + if (ip->mode & GR_INVERT)
48674 + return 2; // specifically denied
48675 + else
48676 + return 1; // allowed
48677 + }
48678 +
48679 + return 0; // not specifically allowed, may continue parsing
48680 +}
48681 +
48682 +static int
48683 +gr_search_connectbind(const int full_mode, struct sock *sk,
48684 + struct sockaddr_in *addr, const int type)
48685 +{
48686 + char iface[IFNAMSIZ] = {0};
48687 + struct acl_subject_label *curr;
48688 + struct acl_ip_label *ip;
48689 + struct inet_sock *isk;
48690 + struct net_device *dev;
48691 + struct in_device *idev;
48692 + unsigned long i;
48693 + int ret;
48694 + int mode = full_mode & (GR_BIND | GR_CONNECT);
48695 + __u32 ip_addr = 0;
48696 + __u32 our_addr;
48697 + __u32 our_netmask;
48698 + char *p;
48699 + __u16 ip_port = 0;
48700 + const struct cred *cred = current_cred();
48701 +
48702 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
48703 + return 0;
48704 +
48705 + curr = current->acl;
48706 + isk = inet_sk(sk);
48707 +
48708 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
48709 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
48710 + addr->sin_addr.s_addr = curr->inaddr_any_override;
48711 + if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
48712 + struct sockaddr_in saddr;
48713 + int err;
48714 +
48715 + saddr.sin_family = AF_INET;
48716 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
48717 + saddr.sin_port = isk->sport;
48718 +
48719 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
48720 + if (err)
48721 + return err;
48722 +
48723 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
48724 + if (err)
48725 + return err;
48726 + }
48727 +
48728 + if (!curr->ips)
48729 + return 0;
48730 +
48731 + ip_addr = addr->sin_addr.s_addr;
48732 + ip_port = ntohs(addr->sin_port);
48733 +
48734 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
48735 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
48736 + current->role->roletype, cred->uid,
48737 + cred->gid, current->exec_file ?
48738 + gr_to_filename(current->exec_file->f_path.dentry,
48739 + current->exec_file->f_path.mnt) :
48740 + curr->filename, curr->filename,
48741 + &ip_addr, ip_port, type,
48742 + sk->sk_protocol, mode, &current->signal->saved_ip);
48743 + return 0;
48744 + }
48745 +
48746 + for (i = 0; i < curr->ip_num; i++) {
48747 + ip = *(curr->ips + i);
48748 + if (ip->iface != NULL) {
48749 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
48750 + p = strchr(iface, ':');
48751 + if (p != NULL)
48752 + *p = '\0';
48753 + dev = dev_get_by_name(sock_net(sk), iface);
48754 + if (dev == NULL)
48755 + continue;
48756 + idev = in_dev_get(dev);
48757 + if (idev == NULL) {
48758 + dev_put(dev);
48759 + continue;
48760 + }
48761 + rcu_read_lock();
48762 + for_ifa(idev) {
48763 + if (!strcmp(ip->iface, ifa->ifa_label)) {
48764 + our_addr = ifa->ifa_address;
48765 + our_netmask = 0xffffffff;
48766 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
48767 + if (ret == 1) {
48768 + rcu_read_unlock();
48769 + in_dev_put(idev);
48770 + dev_put(dev);
48771 + return 0;
48772 + } else if (ret == 2) {
48773 + rcu_read_unlock();
48774 + in_dev_put(idev);
48775 + dev_put(dev);
48776 + goto denied;
48777 + }
48778 + }
48779 + } endfor_ifa(idev);
48780 + rcu_read_unlock();
48781 + in_dev_put(idev);
48782 + dev_put(dev);
48783 + } else {
48784 + our_addr = ip->addr;
48785 + our_netmask = ip->netmask;
48786 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
48787 + if (ret == 1)
48788 + return 0;
48789 + else if (ret == 2)
48790 + goto denied;
48791 + }
48792 + }
48793 +
48794 +denied:
48795 + if (mode == GR_BIND)
48796 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
48797 + else if (mode == GR_CONNECT)
48798 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
48799 +
48800 + return -EACCES;
48801 +}
48802 +
48803 +int
48804 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
48805 +{
48806 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
48807 +}
48808 +
48809 +int
48810 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
48811 +{
48812 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
48813 +}
48814 +
48815 +int gr_search_listen(struct socket *sock)
48816 +{
48817 + struct sock *sk = sock->sk;
48818 + struct sockaddr_in addr;
48819 +
48820 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
48821 + addr.sin_port = inet_sk(sk)->sport;
48822 +
48823 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
48824 +}
48825 +
48826 +int gr_search_accept(struct socket *sock)
48827 +{
48828 + struct sock *sk = sock->sk;
48829 + struct sockaddr_in addr;
48830 +
48831 + addr.sin_addr.s_addr = inet_sk(sk)->saddr;
48832 + addr.sin_port = inet_sk(sk)->sport;
48833 +
48834 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
48835 +}
48836 +
48837 +int
48838 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
48839 +{
48840 + if (addr)
48841 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
48842 + else {
48843 + struct sockaddr_in sin;
48844 + const struct inet_sock *inet = inet_sk(sk);
48845 +
48846 + sin.sin_addr.s_addr = inet->daddr;
48847 + sin.sin_port = inet->dport;
48848 +
48849 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
48850 + }
48851 +}
48852 +
48853 +int
48854 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
48855 +{
48856 + struct sockaddr_in sin;
48857 +
48858 + if (unlikely(skb->len < sizeof (struct udphdr)))
48859 + return 0; // skip this packet
48860 +
48861 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
48862 + sin.sin_port = udp_hdr(skb)->source;
48863 +
48864 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
48865 +}
48866 diff -urNp linux-2.6.32.42/grsecurity/gracl_learn.c linux-2.6.32.42/grsecurity/gracl_learn.c
48867 --- linux-2.6.32.42/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
48868 +++ linux-2.6.32.42/grsecurity/gracl_learn.c 2011-04-17 15:56:46.000000000 -0400
48869 @@ -0,0 +1,211 @@
48870 +#include <linux/kernel.h>
48871 +#include <linux/mm.h>
48872 +#include <linux/sched.h>
48873 +#include <linux/poll.h>
48874 +#include <linux/smp_lock.h>
48875 +#include <linux/string.h>
48876 +#include <linux/file.h>
48877 +#include <linux/types.h>
48878 +#include <linux/vmalloc.h>
48879 +#include <linux/grinternal.h>
48880 +
48881 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
48882 + size_t count, loff_t *ppos);
48883 +extern int gr_acl_is_enabled(void);
48884 +
48885 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
48886 +static int gr_learn_attached;
48887 +
48888 +/* use a 512k buffer */
48889 +#define LEARN_BUFFER_SIZE (512 * 1024)
48890 +
48891 +static DEFINE_SPINLOCK(gr_learn_lock);
48892 +static DEFINE_MUTEX(gr_learn_user_mutex);
48893 +
48894 +/* we need to maintain two buffers, so that the kernel context of grlearn
48895 + uses a semaphore around the userspace copying, and the other kernel contexts
48896 + use a spinlock when copying into the buffer, since they cannot sleep
48897 +*/
48898 +static char *learn_buffer;
48899 +static char *learn_buffer_user;
48900 +static int learn_buffer_len;
48901 +static int learn_buffer_user_len;
48902 +
48903 +static ssize_t
48904 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
48905 +{
48906 + DECLARE_WAITQUEUE(wait, current);
48907 + ssize_t retval = 0;
48908 +
48909 + add_wait_queue(&learn_wait, &wait);
48910 + set_current_state(TASK_INTERRUPTIBLE);
48911 + do {
48912 + mutex_lock(&gr_learn_user_mutex);
48913 + spin_lock(&gr_learn_lock);
48914 + if (learn_buffer_len)
48915 + break;
48916 + spin_unlock(&gr_learn_lock);
48917 + mutex_unlock(&gr_learn_user_mutex);
48918 + if (file->f_flags & O_NONBLOCK) {
48919 + retval = -EAGAIN;
48920 + goto out;
48921 + }
48922 + if (signal_pending(current)) {
48923 + retval = -ERESTARTSYS;
48924 + goto out;
48925 + }
48926 +
48927 + schedule();
48928 + } while (1);
48929 +
48930 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
48931 + learn_buffer_user_len = learn_buffer_len;
48932 + retval = learn_buffer_len;
48933 + learn_buffer_len = 0;
48934 +
48935 + spin_unlock(&gr_learn_lock);
48936 +
48937 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
48938 + retval = -EFAULT;
48939 +
48940 + mutex_unlock(&gr_learn_user_mutex);
48941 +out:
48942 + set_current_state(TASK_RUNNING);
48943 + remove_wait_queue(&learn_wait, &wait);
48944 + return retval;
48945 +}
48946 +
48947 +static unsigned int
48948 +poll_learn(struct file * file, poll_table * wait)
48949 +{
48950 + poll_wait(file, &learn_wait, wait);
48951 +
48952 + if (learn_buffer_len)
48953 + return (POLLIN | POLLRDNORM);
48954 +
48955 + return 0;
48956 +}
48957 +
48958 +void
48959 +gr_clear_learn_entries(void)
48960 +{
48961 + char *tmp;
48962 +
48963 + mutex_lock(&gr_learn_user_mutex);
48964 + if (learn_buffer != NULL) {
48965 + spin_lock(&gr_learn_lock);
48966 + tmp = learn_buffer;
48967 + learn_buffer = NULL;
48968 + spin_unlock(&gr_learn_lock);
48969 + vfree(learn_buffer);
48970 + }
48971 + if (learn_buffer_user != NULL) {
48972 + vfree(learn_buffer_user);
48973 + learn_buffer_user = NULL;
48974 + }
48975 + learn_buffer_len = 0;
48976 + mutex_unlock(&gr_learn_user_mutex);
48977 +
48978 + return;
48979 +}
48980 +
48981 +void
48982 +gr_add_learn_entry(const char *fmt, ...)
48983 +{
48984 + va_list args;
48985 + unsigned int len;
48986 +
48987 + if (!gr_learn_attached)
48988 + return;
48989 +
48990 + spin_lock(&gr_learn_lock);
48991 +
48992 + /* leave a gap at the end so we know when it's "full" but don't have to
48993 + compute the exact length of the string we're trying to append
48994 + */
48995 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
48996 + spin_unlock(&gr_learn_lock);
48997 + wake_up_interruptible(&learn_wait);
48998 + return;
48999 + }
49000 + if (learn_buffer == NULL) {
49001 + spin_unlock(&gr_learn_lock);
49002 + return;
49003 + }
49004 +
49005 + va_start(args, fmt);
49006 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
49007 + va_end(args);
49008 +
49009 + learn_buffer_len += len + 1;
49010 +
49011 + spin_unlock(&gr_learn_lock);
49012 + wake_up_interruptible(&learn_wait);
49013 +
49014 + return;
49015 +}
49016 +
49017 +static int
49018 +open_learn(struct inode *inode, struct file *file)
49019 +{
49020 + if (file->f_mode & FMODE_READ && gr_learn_attached)
49021 + return -EBUSY;
49022 + if (file->f_mode & FMODE_READ) {
49023 + int retval = 0;
49024 + mutex_lock(&gr_learn_user_mutex);
49025 + if (learn_buffer == NULL)
49026 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
49027 + if (learn_buffer_user == NULL)
49028 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
49029 + if (learn_buffer == NULL) {
49030 + retval = -ENOMEM;
49031 + goto out_error;
49032 + }
49033 + if (learn_buffer_user == NULL) {
49034 + retval = -ENOMEM;
49035 + goto out_error;
49036 + }
49037 + learn_buffer_len = 0;
49038 + learn_buffer_user_len = 0;
49039 + gr_learn_attached = 1;
49040 +out_error:
49041 + mutex_unlock(&gr_learn_user_mutex);
49042 + return retval;
49043 + }
49044 + return 0;
49045 +}
49046 +
49047 +static int
49048 +close_learn(struct inode *inode, struct file *file)
49049 +{
49050 + char *tmp;
49051 +
49052 + if (file->f_mode & FMODE_READ) {
49053 + mutex_lock(&gr_learn_user_mutex);
49054 + if (learn_buffer != NULL) {
49055 + spin_lock(&gr_learn_lock);
49056 + tmp = learn_buffer;
49057 + learn_buffer = NULL;
49058 + spin_unlock(&gr_learn_lock);
49059 + vfree(tmp);
49060 + }
49061 + if (learn_buffer_user != NULL) {
49062 + vfree(learn_buffer_user);
49063 + learn_buffer_user = NULL;
49064 + }
49065 + learn_buffer_len = 0;
49066 + learn_buffer_user_len = 0;
49067 + gr_learn_attached = 0;
49068 + mutex_unlock(&gr_learn_user_mutex);
49069 + }
49070 +
49071 + return 0;
49072 +}
49073 +
49074 +const struct file_operations grsec_fops = {
49075 + .read = read_learn,
49076 + .write = write_grsec_handler,
49077 + .open = open_learn,
49078 + .release = close_learn,
49079 + .poll = poll_learn,
49080 +};
49081 diff -urNp linux-2.6.32.42/grsecurity/gracl_res.c linux-2.6.32.42/grsecurity/gracl_res.c
49082 --- linux-2.6.32.42/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
49083 +++ linux-2.6.32.42/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
49084 @@ -0,0 +1,67 @@
49085 +#include <linux/kernel.h>
49086 +#include <linux/sched.h>
49087 +#include <linux/gracl.h>
49088 +#include <linux/grinternal.h>
49089 +
49090 +static const char *restab_log[] = {
49091 + [RLIMIT_CPU] = "RLIMIT_CPU",
49092 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
49093 + [RLIMIT_DATA] = "RLIMIT_DATA",
49094 + [RLIMIT_STACK] = "RLIMIT_STACK",
49095 + [RLIMIT_CORE] = "RLIMIT_CORE",
49096 + [RLIMIT_RSS] = "RLIMIT_RSS",
49097 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
49098 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
49099 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
49100 + [RLIMIT_AS] = "RLIMIT_AS",
49101 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
49102 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
49103 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
49104 + [RLIMIT_NICE] = "RLIMIT_NICE",
49105 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
49106 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
49107 + [GR_CRASH_RES] = "RLIMIT_CRASH"
49108 +};
49109 +
49110 +void
49111 +gr_log_resource(const struct task_struct *task,
49112 + const int res, const unsigned long wanted, const int gt)
49113 +{
49114 + const struct cred *cred;
49115 + unsigned long rlim;
49116 +
49117 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
49118 + return;
49119 +
49120 + // not yet supported resource
49121 + if (unlikely(!restab_log[res]))
49122 + return;
49123 +
49124 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
49125 + rlim = task->signal->rlim[res].rlim_max;
49126 + else
49127 + rlim = task->signal->rlim[res].rlim_cur;
49128 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
49129 + return;
49130 +
49131 + rcu_read_lock();
49132 + cred = __task_cred(task);
49133 +
49134 + if (res == RLIMIT_NPROC &&
49135 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
49136 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
49137 + goto out_rcu_unlock;
49138 + else if (res == RLIMIT_MEMLOCK &&
49139 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
49140 + goto out_rcu_unlock;
49141 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
49142 + goto out_rcu_unlock;
49143 + rcu_read_unlock();
49144 +
49145 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
49146 +
49147 + return;
49148 +out_rcu_unlock:
49149 + rcu_read_unlock();
49150 + return;
49151 +}
49152 diff -urNp linux-2.6.32.42/grsecurity/gracl_segv.c linux-2.6.32.42/grsecurity/gracl_segv.c
49153 --- linux-2.6.32.42/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
49154 +++ linux-2.6.32.42/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
49155 @@ -0,0 +1,284 @@
49156 +#include <linux/kernel.h>
49157 +#include <linux/mm.h>
49158 +#include <asm/uaccess.h>
49159 +#include <asm/errno.h>
49160 +#include <asm/mman.h>
49161 +#include <net/sock.h>
49162 +#include <linux/file.h>
49163 +#include <linux/fs.h>
49164 +#include <linux/net.h>
49165 +#include <linux/in.h>
49166 +#include <linux/smp_lock.h>
49167 +#include <linux/slab.h>
49168 +#include <linux/types.h>
49169 +#include <linux/sched.h>
49170 +#include <linux/timer.h>
49171 +#include <linux/gracl.h>
49172 +#include <linux/grsecurity.h>
49173 +#include <linux/grinternal.h>
49174 +
49175 +static struct crash_uid *uid_set;
49176 +static unsigned short uid_used;
49177 +static DEFINE_SPINLOCK(gr_uid_lock);
49178 +extern rwlock_t gr_inode_lock;
49179 +extern struct acl_subject_label *
49180 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
49181 + struct acl_role_label *role);
49182 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
49183 +
49184 +int
49185 +gr_init_uidset(void)
49186 +{
49187 + uid_set =
49188 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
49189 + uid_used = 0;
49190 +
49191 + return uid_set ? 1 : 0;
49192 +}
49193 +
49194 +void
49195 +gr_free_uidset(void)
49196 +{
49197 + if (uid_set)
49198 + kfree(uid_set);
49199 +
49200 + return;
49201 +}
49202 +
49203 +int
49204 +gr_find_uid(const uid_t uid)
49205 +{
49206 + struct crash_uid *tmp = uid_set;
49207 + uid_t buid;
49208 + int low = 0, high = uid_used - 1, mid;
49209 +
49210 + while (high >= low) {
49211 + mid = (low + high) >> 1;
49212 + buid = tmp[mid].uid;
49213 + if (buid == uid)
49214 + return mid;
49215 + if (buid > uid)
49216 + high = mid - 1;
49217 + if (buid < uid)
49218 + low = mid + 1;
49219 + }
49220 +
49221 + return -1;
49222 +}
49223 +
49224 +static __inline__ void
49225 +gr_insertsort(void)
49226 +{
49227 + unsigned short i, j;
49228 + struct crash_uid index;
49229 +
49230 + for (i = 1; i < uid_used; i++) {
49231 + index = uid_set[i];
49232 + j = i;
49233 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
49234 + uid_set[j] = uid_set[j - 1];
49235 + j--;
49236 + }
49237 + uid_set[j] = index;
49238 + }
49239 +
49240 + return;
49241 +}
49242 +
49243 +static __inline__ void
49244 +gr_insert_uid(const uid_t uid, const unsigned long expires)
49245 +{
49246 + int loc;
49247 +
49248 + if (uid_used == GR_UIDTABLE_MAX)
49249 + return;
49250 +
49251 + loc = gr_find_uid(uid);
49252 +
49253 + if (loc >= 0) {
49254 + uid_set[loc].expires = expires;
49255 + return;
49256 + }
49257 +
49258 + uid_set[uid_used].uid = uid;
49259 + uid_set[uid_used].expires = expires;
49260 + uid_used++;
49261 +
49262 + gr_insertsort();
49263 +
49264 + return;
49265 +}
49266 +
49267 +void
49268 +gr_remove_uid(const unsigned short loc)
49269 +{
49270 + unsigned short i;
49271 +
49272 + for (i = loc + 1; i < uid_used; i++)
49273 + uid_set[i - 1] = uid_set[i];
49274 +
49275 + uid_used--;
49276 +
49277 + return;
49278 +}
49279 +
49280 +int
49281 +gr_check_crash_uid(const uid_t uid)
49282 +{
49283 + int loc;
49284 + int ret = 0;
49285 +
49286 + if (unlikely(!gr_acl_is_enabled()))
49287 + return 0;
49288 +
49289 + spin_lock(&gr_uid_lock);
49290 + loc = gr_find_uid(uid);
49291 +
49292 + if (loc < 0)
49293 + goto out_unlock;
49294 +
49295 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
49296 + gr_remove_uid(loc);
49297 + else
49298 + ret = 1;
49299 +
49300 +out_unlock:
49301 + spin_unlock(&gr_uid_lock);
49302 + return ret;
49303 +}
49304 +
49305 +static __inline__ int
49306 +proc_is_setxid(const struct cred *cred)
49307 +{
49308 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
49309 + cred->uid != cred->fsuid)
49310 + return 1;
49311 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
49312 + cred->gid != cred->fsgid)
49313 + return 1;
49314 +
49315 + return 0;
49316 +}
49317 +
49318 +void
49319 +gr_handle_crash(struct task_struct *task, const int sig)
49320 +{
49321 + struct acl_subject_label *curr;
49322 + struct acl_subject_label *curr2;
49323 + struct task_struct *tsk, *tsk2;
49324 + const struct cred *cred;
49325 + const struct cred *cred2;
49326 +
49327 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
49328 + return;
49329 +
49330 + if (unlikely(!gr_acl_is_enabled()))
49331 + return;
49332 +
49333 + curr = task->acl;
49334 +
49335 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
49336 + return;
49337 +
49338 + if (time_before_eq(curr->expires, get_seconds())) {
49339 + curr->expires = 0;
49340 + curr->crashes = 0;
49341 + }
49342 +
49343 + curr->crashes++;
49344 +
49345 + if (!curr->expires)
49346 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
49347 +
49348 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49349 + time_after(curr->expires, get_seconds())) {
49350 + rcu_read_lock();
49351 + cred = __task_cred(task);
49352 + if (cred->uid && proc_is_setxid(cred)) {
49353 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49354 + spin_lock(&gr_uid_lock);
49355 + gr_insert_uid(cred->uid, curr->expires);
49356 + spin_unlock(&gr_uid_lock);
49357 + curr->expires = 0;
49358 + curr->crashes = 0;
49359 + read_lock(&tasklist_lock);
49360 + do_each_thread(tsk2, tsk) {
49361 + cred2 = __task_cred(tsk);
49362 + if (tsk != task && cred2->uid == cred->uid)
49363 + gr_fake_force_sig(SIGKILL, tsk);
49364 + } while_each_thread(tsk2, tsk);
49365 + read_unlock(&tasklist_lock);
49366 + } else {
49367 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
49368 + read_lock(&tasklist_lock);
49369 + do_each_thread(tsk2, tsk) {
49370 + if (likely(tsk != task)) {
49371 + curr2 = tsk->acl;
49372 +
49373 + if (curr2->device == curr->device &&
49374 + curr2->inode == curr->inode)
49375 + gr_fake_force_sig(SIGKILL, tsk);
49376 + }
49377 + } while_each_thread(tsk2, tsk);
49378 + read_unlock(&tasklist_lock);
49379 + }
49380 + rcu_read_unlock();
49381 + }
49382 +
49383 + return;
49384 +}
49385 +
49386 +int
49387 +gr_check_crash_exec(const struct file *filp)
49388 +{
49389 + struct acl_subject_label *curr;
49390 +
49391 + if (unlikely(!gr_acl_is_enabled()))
49392 + return 0;
49393 +
49394 + read_lock(&gr_inode_lock);
49395 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
49396 + filp->f_path.dentry->d_inode->i_sb->s_dev,
49397 + current->role);
49398 + read_unlock(&gr_inode_lock);
49399 +
49400 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
49401 + (!curr->crashes && !curr->expires))
49402 + return 0;
49403 +
49404 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
49405 + time_after(curr->expires, get_seconds()))
49406 + return 1;
49407 + else if (time_before_eq(curr->expires, get_seconds())) {
49408 + curr->crashes = 0;
49409 + curr->expires = 0;
49410 + }
49411 +
49412 + return 0;
49413 +}
49414 +
49415 +void
49416 +gr_handle_alertkill(struct task_struct *task)
49417 +{
49418 + struct acl_subject_label *curracl;
49419 + __u32 curr_ip;
49420 + struct task_struct *p, *p2;
49421 +
49422 + if (unlikely(!gr_acl_is_enabled()))
49423 + return;
49424 +
49425 + curracl = task->acl;
49426 + curr_ip = task->signal->curr_ip;
49427 +
49428 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
49429 + read_lock(&tasklist_lock);
49430 + do_each_thread(p2, p) {
49431 + if (p->signal->curr_ip == curr_ip)
49432 + gr_fake_force_sig(SIGKILL, p);
49433 + } while_each_thread(p2, p);
49434 + read_unlock(&tasklist_lock);
49435 + } else if (curracl->mode & GR_KILLPROC)
49436 + gr_fake_force_sig(SIGKILL, task);
49437 +
49438 + return;
49439 +}
49440 diff -urNp linux-2.6.32.42/grsecurity/gracl_shm.c linux-2.6.32.42/grsecurity/gracl_shm.c
49441 --- linux-2.6.32.42/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
49442 +++ linux-2.6.32.42/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
49443 @@ -0,0 +1,40 @@
49444 +#include <linux/kernel.h>
49445 +#include <linux/mm.h>
49446 +#include <linux/sched.h>
49447 +#include <linux/file.h>
49448 +#include <linux/ipc.h>
49449 +#include <linux/gracl.h>
49450 +#include <linux/grsecurity.h>
49451 +#include <linux/grinternal.h>
49452 +
49453 +int
49454 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49455 + const time_t shm_createtime, const uid_t cuid, const int shmid)
49456 +{
49457 + struct task_struct *task;
49458 +
49459 + if (!gr_acl_is_enabled())
49460 + return 1;
49461 +
49462 + rcu_read_lock();
49463 + read_lock(&tasklist_lock);
49464 +
49465 + task = find_task_by_vpid(shm_cprid);
49466 +
49467 + if (unlikely(!task))
49468 + task = find_task_by_vpid(shm_lapid);
49469 +
49470 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
49471 + (task->pid == shm_lapid)) &&
49472 + (task->acl->mode & GR_PROTSHM) &&
49473 + (task->acl != current->acl))) {
49474 + read_unlock(&tasklist_lock);
49475 + rcu_read_unlock();
49476 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
49477 + return 0;
49478 + }
49479 + read_unlock(&tasklist_lock);
49480 + rcu_read_unlock();
49481 +
49482 + return 1;
49483 +}
49484 diff -urNp linux-2.6.32.42/grsecurity/grsec_chdir.c linux-2.6.32.42/grsecurity/grsec_chdir.c
49485 --- linux-2.6.32.42/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
49486 +++ linux-2.6.32.42/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
49487 @@ -0,0 +1,19 @@
49488 +#include <linux/kernel.h>
49489 +#include <linux/sched.h>
49490 +#include <linux/fs.h>
49491 +#include <linux/file.h>
49492 +#include <linux/grsecurity.h>
49493 +#include <linux/grinternal.h>
49494 +
49495 +void
49496 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
49497 +{
49498 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49499 + if ((grsec_enable_chdir && grsec_enable_group &&
49500 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
49501 + !grsec_enable_group)) {
49502 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
49503 + }
49504 +#endif
49505 + return;
49506 +}
49507 diff -urNp linux-2.6.32.42/grsecurity/grsec_chroot.c linux-2.6.32.42/grsecurity/grsec_chroot.c
49508 --- linux-2.6.32.42/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
49509 +++ linux-2.6.32.42/grsecurity/grsec_chroot.c 2011-06-20 19:44:00.000000000 -0400
49510 @@ -0,0 +1,395 @@
49511 +#include <linux/kernel.h>
49512 +#include <linux/module.h>
49513 +#include <linux/sched.h>
49514 +#include <linux/file.h>
49515 +#include <linux/fs.h>
49516 +#include <linux/mount.h>
49517 +#include <linux/types.h>
49518 +#include <linux/pid_namespace.h>
49519 +#include <linux/grsecurity.h>
49520 +#include <linux/grinternal.h>
49521 +
49522 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
49523 +{
49524 +#ifdef CONFIG_GRKERNSEC
49525 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
49526 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
49527 + task->gr_is_chrooted = 1;
49528 + else
49529 + task->gr_is_chrooted = 0;
49530 +
49531 + task->gr_chroot_dentry = path->dentry;
49532 +#endif
49533 + return;
49534 +}
49535 +
49536 +void gr_clear_chroot_entries(struct task_struct *task)
49537 +{
49538 +#ifdef CONFIG_GRKERNSEC
49539 + task->gr_is_chrooted = 0;
49540 + task->gr_chroot_dentry = NULL;
49541 +#endif
49542 + return;
49543 +}
49544 +
49545 +int
49546 +gr_handle_chroot_unix(const pid_t pid)
49547 +{
49548 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49549 + struct pid *spid = NULL;
49550 +
49551 + if (unlikely(!grsec_enable_chroot_unix))
49552 + return 1;
49553 +
49554 + if (likely(!proc_is_chrooted(current)))
49555 + return 1;
49556 +
49557 + rcu_read_lock();
49558 + read_lock(&tasklist_lock);
49559 +
49560 + spid = find_vpid(pid);
49561 + if (spid) {
49562 + struct task_struct *p;
49563 + p = pid_task(spid, PIDTYPE_PID);
49564 + if (unlikely(p && !have_same_root(current, p))) {
49565 + read_unlock(&tasklist_lock);
49566 + rcu_read_unlock();
49567 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
49568 + return 0;
49569 + }
49570 + }
49571 + read_unlock(&tasklist_lock);
49572 + rcu_read_unlock();
49573 +#endif
49574 + return 1;
49575 +}
49576 +
49577 +int
49578 +gr_handle_chroot_nice(void)
49579 +{
49580 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49581 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
49582 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
49583 + return -EPERM;
49584 + }
49585 +#endif
49586 + return 0;
49587 +}
49588 +
49589 +int
49590 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
49591 +{
49592 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49593 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
49594 + && proc_is_chrooted(current)) {
49595 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
49596 + return -EACCES;
49597 + }
49598 +#endif
49599 + return 0;
49600 +}
49601 +
49602 +int
49603 +gr_handle_chroot_rawio(const struct inode *inode)
49604 +{
49605 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49606 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
49607 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
49608 + return 1;
49609 +#endif
49610 + return 0;
49611 +}
49612 +
49613 +int
49614 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
49615 +{
49616 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49617 + struct task_struct *p;
49618 + int ret = 0;
49619 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
49620 + return ret;
49621 +
49622 + read_lock(&tasklist_lock);
49623 + do_each_pid_task(pid, type, p) {
49624 + if (!have_same_root(current, p)) {
49625 + ret = 1;
49626 + goto out;
49627 + }
49628 + } while_each_pid_task(pid, type, p);
49629 +out:
49630 + read_unlock(&tasklist_lock);
49631 + return ret;
49632 +#endif
49633 + return 0;
49634 +}
49635 +
49636 +int
49637 +gr_pid_is_chrooted(struct task_struct *p)
49638 +{
49639 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49640 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
49641 + return 0;
49642 +
49643 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
49644 + !have_same_root(current, p)) {
49645 + return 1;
49646 + }
49647 +#endif
49648 + return 0;
49649 +}
49650 +
49651 +EXPORT_SYMBOL(gr_pid_is_chrooted);
49652 +
49653 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
49654 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
49655 +{
49656 + struct dentry *dentry = (struct dentry *)u_dentry;
49657 + struct vfsmount *mnt = (struct vfsmount *)u_mnt;
49658 + struct dentry *realroot;
49659 + struct vfsmount *realrootmnt;
49660 + struct dentry *currentroot;
49661 + struct vfsmount *currentmnt;
49662 + struct task_struct *reaper = &init_task;
49663 + int ret = 1;
49664 +
49665 + read_lock(&reaper->fs->lock);
49666 + realrootmnt = mntget(reaper->fs->root.mnt);
49667 + realroot = dget(reaper->fs->root.dentry);
49668 + read_unlock(&reaper->fs->lock);
49669 +
49670 + read_lock(&current->fs->lock);
49671 + currentmnt = mntget(current->fs->root.mnt);
49672 + currentroot = dget(current->fs->root.dentry);
49673 + read_unlock(&current->fs->lock);
49674 +
49675 + spin_lock(&dcache_lock);
49676 + for (;;) {
49677 + if (unlikely((dentry == realroot && mnt == realrootmnt)
49678 + || (dentry == currentroot && mnt == currentmnt)))
49679 + break;
49680 + if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
49681 + if (mnt->mnt_parent == mnt)
49682 + break;
49683 + dentry = mnt->mnt_mountpoint;
49684 + mnt = mnt->mnt_parent;
49685 + continue;
49686 + }
49687 + dentry = dentry->d_parent;
49688 + }
49689 + spin_unlock(&dcache_lock);
49690 +
49691 + dput(currentroot);
49692 + mntput(currentmnt);
49693 +
49694 + /* access is outside of chroot */
49695 + if (dentry == realroot && mnt == realrootmnt)
49696 + ret = 0;
49697 +
49698 + dput(realroot);
49699 + mntput(realrootmnt);
49700 + return ret;
49701 +}
49702 +#endif
49703 +
49704 +int
49705 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
49706 +{
49707 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49708 + if (!grsec_enable_chroot_fchdir)
49709 + return 1;
49710 +
49711 + if (!proc_is_chrooted(current))
49712 + return 1;
49713 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
49714 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
49715 + return 0;
49716 + }
49717 +#endif
49718 + return 1;
49719 +}
49720 +
49721 +int
49722 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
49723 + const time_t shm_createtime)
49724 +{
49725 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49726 + struct pid *pid = NULL;
49727 + time_t starttime;
49728 +
49729 + if (unlikely(!grsec_enable_chroot_shmat))
49730 + return 1;
49731 +
49732 + if (likely(!proc_is_chrooted(current)))
49733 + return 1;
49734 +
49735 + rcu_read_lock();
49736 + read_lock(&tasklist_lock);
49737 +
49738 + pid = find_vpid(shm_cprid);
49739 + if (pid) {
49740 + struct task_struct *p;
49741 + p = pid_task(pid, PIDTYPE_PID);
49742 + if (p == NULL)
49743 + goto unlock;
49744 + starttime = p->start_time.tv_sec;
49745 + if (unlikely(!have_same_root(current, p) &&
49746 + time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime))) {
49747 + read_unlock(&tasklist_lock);
49748 + rcu_read_unlock();
49749 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
49750 + return 0;
49751 + }
49752 + } else {
49753 + pid = find_vpid(shm_lapid);
49754 + if (pid) {
49755 + struct task_struct *p;
49756 + p = pid_task(pid, PIDTYPE_PID);
49757 + if (p == NULL)
49758 + goto unlock;
49759 + if (unlikely(!have_same_root(current, p))) {
49760 + read_unlock(&tasklist_lock);
49761 + rcu_read_unlock();
49762 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
49763 + return 0;
49764 + }
49765 + }
49766 + }
49767 +
49768 +unlock:
49769 + read_unlock(&tasklist_lock);
49770 + rcu_read_unlock();
49771 +#endif
49772 + return 1;
49773 +}
49774 +
49775 +void
49776 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
49777 +{
49778 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49779 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
49780 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
49781 +#endif
49782 + return;
49783 +}
49784 +
49785 +int
49786 +gr_handle_chroot_mknod(const struct dentry *dentry,
49787 + const struct vfsmount *mnt, const int mode)
49788 +{
49789 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49790 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
49791 + proc_is_chrooted(current)) {
49792 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
49793 + return -EPERM;
49794 + }
49795 +#endif
49796 + return 0;
49797 +}
49798 +
49799 +int
49800 +gr_handle_chroot_mount(const struct dentry *dentry,
49801 + const struct vfsmount *mnt, const char *dev_name)
49802 +{
49803 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49804 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
49805 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
49806 + return -EPERM;
49807 + }
49808 +#endif
49809 + return 0;
49810 +}
49811 +
49812 +int
49813 +gr_handle_chroot_pivot(void)
49814 +{
49815 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49816 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
49817 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
49818 + return -EPERM;
49819 + }
49820 +#endif
49821 + return 0;
49822 +}
49823 +
49824 +int
49825 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
49826 +{
49827 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49828 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
49829 + !gr_is_outside_chroot(dentry, mnt)) {
49830 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
49831 + return -EPERM;
49832 + }
49833 +#endif
49834 + return 0;
49835 +}
49836 +
49837 +int
49838 +gr_handle_chroot_caps(struct path *path)
49839 +{
49840 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49841 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
49842 + (init_task.fs->root.dentry != path->dentry) &&
49843 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
49844 +
49845 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
49846 + const struct cred *old = current_cred();
49847 + struct cred *new = prepare_creds();
49848 + if (new == NULL)
49849 + return 1;
49850 +
49851 + new->cap_permitted = cap_drop(old->cap_permitted,
49852 + chroot_caps);
49853 + new->cap_inheritable = cap_drop(old->cap_inheritable,
49854 + chroot_caps);
49855 + new->cap_effective = cap_drop(old->cap_effective,
49856 + chroot_caps);
49857 +
49858 + commit_creds(new);
49859 +
49860 + return 0;
49861 + }
49862 +#endif
49863 + return 0;
49864 +}
49865 +
49866 +int
49867 +gr_handle_chroot_sysctl(const int op)
49868 +{
49869 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49870 + if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
49871 + && (op & MAY_WRITE))
49872 + return -EACCES;
49873 +#endif
49874 + return 0;
49875 +}
49876 +
49877 +void
49878 +gr_handle_chroot_chdir(struct path *path)
49879 +{
49880 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49881 + if (grsec_enable_chroot_chdir)
49882 + set_fs_pwd(current->fs, path);
49883 +#endif
49884 + return;
49885 +}
49886 +
49887 +int
49888 +gr_handle_chroot_chmod(const struct dentry *dentry,
49889 + const struct vfsmount *mnt, const int mode)
49890 +{
49891 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49892 + /* allow chmod +s on directories, but not on files */
49893 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
49894 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
49895 + proc_is_chrooted(current)) {
49896 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
49897 + return -EPERM;
49898 + }
49899 +#endif
49900 + return 0;
49901 +}
49902 +
49903 +#ifdef CONFIG_SECURITY
49904 +EXPORT_SYMBOL(gr_handle_chroot_caps);
49905 +#endif
49906 diff -urNp linux-2.6.32.42/grsecurity/grsec_disabled.c linux-2.6.32.42/grsecurity/grsec_disabled.c
49907 --- linux-2.6.32.42/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
49908 +++ linux-2.6.32.42/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
49909 @@ -0,0 +1,447 @@
49910 +#include <linux/kernel.h>
49911 +#include <linux/module.h>
49912 +#include <linux/sched.h>
49913 +#include <linux/file.h>
49914 +#include <linux/fs.h>
49915 +#include <linux/kdev_t.h>
49916 +#include <linux/net.h>
49917 +#include <linux/in.h>
49918 +#include <linux/ip.h>
49919 +#include <linux/skbuff.h>
49920 +#include <linux/sysctl.h>
49921 +
49922 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
49923 +void
49924 +pax_set_initial_flags(struct linux_binprm *bprm)
49925 +{
49926 + return;
49927 +}
49928 +#endif
49929 +
49930 +#ifdef CONFIG_SYSCTL
49931 +__u32
49932 +gr_handle_sysctl(const struct ctl_table * table, const int op)
49933 +{
49934 + return 0;
49935 +}
49936 +#endif
49937 +
49938 +#ifdef CONFIG_TASKSTATS
49939 +int gr_is_taskstats_denied(int pid)
49940 +{
49941 + return 0;
49942 +}
49943 +#endif
49944 +
49945 +int
49946 +gr_acl_is_enabled(void)
49947 +{
49948 + return 0;
49949 +}
49950 +
49951 +int
49952 +gr_handle_rawio(const struct inode *inode)
49953 +{
49954 + return 0;
49955 +}
49956 +
49957 +void
49958 +gr_acl_handle_psacct(struct task_struct *task, const long code)
49959 +{
49960 + return;
49961 +}
49962 +
49963 +int
49964 +gr_handle_ptrace(struct task_struct *task, const long request)
49965 +{
49966 + return 0;
49967 +}
49968 +
49969 +int
49970 +gr_handle_proc_ptrace(struct task_struct *task)
49971 +{
49972 + return 0;
49973 +}
49974 +
49975 +void
49976 +gr_learn_resource(const struct task_struct *task,
49977 + const int res, const unsigned long wanted, const int gt)
49978 +{
49979 + return;
49980 +}
49981 +
49982 +int
49983 +gr_set_acls(const int type)
49984 +{
49985 + return 0;
49986 +}
49987 +
49988 +int
49989 +gr_check_hidden_task(const struct task_struct *tsk)
49990 +{
49991 + return 0;
49992 +}
49993 +
49994 +int
49995 +gr_check_protected_task(const struct task_struct *task)
49996 +{
49997 + return 0;
49998 +}
49999 +
50000 +int
50001 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50002 +{
50003 + return 0;
50004 +}
50005 +
50006 +void
50007 +gr_copy_label(struct task_struct *tsk)
50008 +{
50009 + return;
50010 +}
50011 +
50012 +void
50013 +gr_set_pax_flags(struct task_struct *task)
50014 +{
50015 + return;
50016 +}
50017 +
50018 +int
50019 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50020 + const int unsafe_share)
50021 +{
50022 + return 0;
50023 +}
50024 +
50025 +void
50026 +gr_handle_delete(const ino_t ino, const dev_t dev)
50027 +{
50028 + return;
50029 +}
50030 +
50031 +void
50032 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50033 +{
50034 + return;
50035 +}
50036 +
50037 +void
50038 +gr_handle_crash(struct task_struct *task, const int sig)
50039 +{
50040 + return;
50041 +}
50042 +
50043 +int
50044 +gr_check_crash_exec(const struct file *filp)
50045 +{
50046 + return 0;
50047 +}
50048 +
50049 +int
50050 +gr_check_crash_uid(const uid_t uid)
50051 +{
50052 + return 0;
50053 +}
50054 +
50055 +void
50056 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50057 + struct dentry *old_dentry,
50058 + struct dentry *new_dentry,
50059 + struct vfsmount *mnt, const __u8 replace)
50060 +{
50061 + return;
50062 +}
50063 +
50064 +int
50065 +gr_search_socket(const int family, const int type, const int protocol)
50066 +{
50067 + return 1;
50068 +}
50069 +
50070 +int
50071 +gr_search_connectbind(const int mode, const struct socket *sock,
50072 + const struct sockaddr_in *addr)
50073 +{
50074 + return 0;
50075 +}
50076 +
50077 +int
50078 +gr_is_capable(const int cap)
50079 +{
50080 + return 1;
50081 +}
50082 +
50083 +int
50084 +gr_is_capable_nolog(const int cap)
50085 +{
50086 + return 1;
50087 +}
50088 +
50089 +void
50090 +gr_handle_alertkill(struct task_struct *task)
50091 +{
50092 + return;
50093 +}
50094 +
50095 +__u32
50096 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
50097 +{
50098 + return 1;
50099 +}
50100 +
50101 +__u32
50102 +gr_acl_handle_hidden_file(const struct dentry * dentry,
50103 + const struct vfsmount * mnt)
50104 +{
50105 + return 1;
50106 +}
50107 +
50108 +__u32
50109 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50110 + const int fmode)
50111 +{
50112 + return 1;
50113 +}
50114 +
50115 +__u32
50116 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
50117 +{
50118 + return 1;
50119 +}
50120 +
50121 +__u32
50122 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
50123 +{
50124 + return 1;
50125 +}
50126 +
50127 +int
50128 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
50129 + unsigned int *vm_flags)
50130 +{
50131 + return 1;
50132 +}
50133 +
50134 +__u32
50135 +gr_acl_handle_truncate(const struct dentry * dentry,
50136 + const struct vfsmount * mnt)
50137 +{
50138 + return 1;
50139 +}
50140 +
50141 +__u32
50142 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
50143 +{
50144 + return 1;
50145 +}
50146 +
50147 +__u32
50148 +gr_acl_handle_access(const struct dentry * dentry,
50149 + const struct vfsmount * mnt, const int fmode)
50150 +{
50151 + return 1;
50152 +}
50153 +
50154 +__u32
50155 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
50156 + mode_t mode)
50157 +{
50158 + return 1;
50159 +}
50160 +
50161 +__u32
50162 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
50163 + mode_t mode)
50164 +{
50165 + return 1;
50166 +}
50167 +
50168 +__u32
50169 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
50170 +{
50171 + return 1;
50172 +}
50173 +
50174 +__u32
50175 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
50176 +{
50177 + return 1;
50178 +}
50179 +
50180 +void
50181 +grsecurity_init(void)
50182 +{
50183 + return;
50184 +}
50185 +
50186 +__u32
50187 +gr_acl_handle_mknod(const struct dentry * new_dentry,
50188 + const struct dentry * parent_dentry,
50189 + const struct vfsmount * parent_mnt,
50190 + const int mode)
50191 +{
50192 + return 1;
50193 +}
50194 +
50195 +__u32
50196 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
50197 + const struct dentry * parent_dentry,
50198 + const struct vfsmount * parent_mnt)
50199 +{
50200 + return 1;
50201 +}
50202 +
50203 +__u32
50204 +gr_acl_handle_symlink(const struct dentry * new_dentry,
50205 + const struct dentry * parent_dentry,
50206 + const struct vfsmount * parent_mnt, const char *from)
50207 +{
50208 + return 1;
50209 +}
50210 +
50211 +__u32
50212 +gr_acl_handle_link(const struct dentry * new_dentry,
50213 + const struct dentry * parent_dentry,
50214 + const struct vfsmount * parent_mnt,
50215 + const struct dentry * old_dentry,
50216 + const struct vfsmount * old_mnt, const char *to)
50217 +{
50218 + return 1;
50219 +}
50220 +
50221 +int
50222 +gr_acl_handle_rename(const struct dentry *new_dentry,
50223 + const struct dentry *parent_dentry,
50224 + const struct vfsmount *parent_mnt,
50225 + const struct dentry *old_dentry,
50226 + const struct inode *old_parent_inode,
50227 + const struct vfsmount *old_mnt, const char *newname)
50228 +{
50229 + return 0;
50230 +}
50231 +
50232 +int
50233 +gr_acl_handle_filldir(const struct file *file, const char *name,
50234 + const int namelen, const ino_t ino)
50235 +{
50236 + return 1;
50237 +}
50238 +
50239 +int
50240 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50241 + const time_t shm_createtime, const uid_t cuid, const int shmid)
50242 +{
50243 + return 1;
50244 +}
50245 +
50246 +int
50247 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
50248 +{
50249 + return 0;
50250 +}
50251 +
50252 +int
50253 +gr_search_accept(const struct socket *sock)
50254 +{
50255 + return 0;
50256 +}
50257 +
50258 +int
50259 +gr_search_listen(const struct socket *sock)
50260 +{
50261 + return 0;
50262 +}
50263 +
50264 +int
50265 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
50266 +{
50267 + return 0;
50268 +}
50269 +
50270 +__u32
50271 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
50272 +{
50273 + return 1;
50274 +}
50275 +
50276 +__u32
50277 +gr_acl_handle_creat(const struct dentry * dentry,
50278 + const struct dentry * p_dentry,
50279 + const struct vfsmount * p_mnt, const int fmode,
50280 + const int imode)
50281 +{
50282 + return 1;
50283 +}
50284 +
50285 +void
50286 +gr_acl_handle_exit(void)
50287 +{
50288 + return;
50289 +}
50290 +
50291 +int
50292 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
50293 +{
50294 + return 1;
50295 +}
50296 +
50297 +void
50298 +gr_set_role_label(const uid_t uid, const gid_t gid)
50299 +{
50300 + return;
50301 +}
50302 +
50303 +int
50304 +gr_acl_handle_procpidmem(const struct task_struct *task)
50305 +{
50306 + return 0;
50307 +}
50308 +
50309 +int
50310 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
50311 +{
50312 + return 0;
50313 +}
50314 +
50315 +int
50316 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
50317 +{
50318 + return 0;
50319 +}
50320 +
50321 +void
50322 +gr_set_kernel_label(struct task_struct *task)
50323 +{
50324 + return;
50325 +}
50326 +
50327 +int
50328 +gr_check_user_change(int real, int effective, int fs)
50329 +{
50330 + return 0;
50331 +}
50332 +
50333 +int
50334 +gr_check_group_change(int real, int effective, int fs)
50335 +{
50336 + return 0;
50337 +}
50338 +
50339 +int gr_acl_enable_at_secure(void)
50340 +{
50341 + return 0;
50342 +}
50343 +
50344 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
50345 +{
50346 + return dentry->d_inode->i_sb->s_dev;
50347 +}
50348 +
50349 +EXPORT_SYMBOL(gr_is_capable);
50350 +EXPORT_SYMBOL(gr_is_capable_nolog);
50351 +EXPORT_SYMBOL(gr_learn_resource);
50352 +EXPORT_SYMBOL(gr_set_kernel_label);
50353 +#ifdef CONFIG_SECURITY
50354 +EXPORT_SYMBOL(gr_check_user_change);
50355 +EXPORT_SYMBOL(gr_check_group_change);
50356 +#endif
50357 diff -urNp linux-2.6.32.42/grsecurity/grsec_exec.c linux-2.6.32.42/grsecurity/grsec_exec.c
50358 --- linux-2.6.32.42/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
50359 +++ linux-2.6.32.42/grsecurity/grsec_exec.c 2011-04-17 15:56:46.000000000 -0400
50360 @@ -0,0 +1,148 @@
50361 +#include <linux/kernel.h>
50362 +#include <linux/sched.h>
50363 +#include <linux/file.h>
50364 +#include <linux/binfmts.h>
50365 +#include <linux/smp_lock.h>
50366 +#include <linux/fs.h>
50367 +#include <linux/types.h>
50368 +#include <linux/grdefs.h>
50369 +#include <linux/grinternal.h>
50370 +#include <linux/capability.h>
50371 +#include <linux/compat.h>
50372 +
50373 +#include <asm/uaccess.h>
50374 +
50375 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50376 +static char gr_exec_arg_buf[132];
50377 +static DEFINE_MUTEX(gr_exec_arg_mutex);
50378 +#endif
50379 +
50380 +int
50381 +gr_handle_nproc(void)
50382 +{
50383 +#ifdef CONFIG_GRKERNSEC_EXECVE
50384 + const struct cred *cred = current_cred();
50385 + if (grsec_enable_execve && cred->user &&
50386 + (atomic_read(&cred->user->processes) >
50387 + current->signal->rlim[RLIMIT_NPROC].rlim_cur) &&
50388 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
50389 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
50390 + return -EAGAIN;
50391 + }
50392 +#endif
50393 + return 0;
50394 +}
50395 +
50396 +void
50397 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
50398 +{
50399 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50400 + char *grarg = gr_exec_arg_buf;
50401 + unsigned int i, x, execlen = 0;
50402 + char c;
50403 +
50404 + if (!((grsec_enable_execlog && grsec_enable_group &&
50405 + in_group_p(grsec_audit_gid))
50406 + || (grsec_enable_execlog && !grsec_enable_group)))
50407 + return;
50408 +
50409 + mutex_lock(&gr_exec_arg_mutex);
50410 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
50411 +
50412 + if (unlikely(argv == NULL))
50413 + goto log;
50414 +
50415 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
50416 + const char __user *p;
50417 + unsigned int len;
50418 +
50419 + if (copy_from_user(&p, argv + i, sizeof(p)))
50420 + goto log;
50421 + if (!p)
50422 + goto log;
50423 + len = strnlen_user(p, 128 - execlen);
50424 + if (len > 128 - execlen)
50425 + len = 128 - execlen;
50426 + else if (len > 0)
50427 + len--;
50428 + if (copy_from_user(grarg + execlen, p, len))
50429 + goto log;
50430 +
50431 + /* rewrite unprintable characters */
50432 + for (x = 0; x < len; x++) {
50433 + c = *(grarg + execlen + x);
50434 + if (c < 32 || c > 126)
50435 + *(grarg + execlen + x) = ' ';
50436 + }
50437 +
50438 + execlen += len;
50439 + *(grarg + execlen) = ' ';
50440 + *(grarg + execlen + 1) = '\0';
50441 + execlen++;
50442 + }
50443 +
50444 + log:
50445 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50446 + bprm->file->f_path.mnt, grarg);
50447 + mutex_unlock(&gr_exec_arg_mutex);
50448 +#endif
50449 + return;
50450 +}
50451 +
50452 +#ifdef CONFIG_COMPAT
50453 +void
50454 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
50455 +{
50456 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50457 + char *grarg = gr_exec_arg_buf;
50458 + unsigned int i, x, execlen = 0;
50459 + char c;
50460 +
50461 + if (!((grsec_enable_execlog && grsec_enable_group &&
50462 + in_group_p(grsec_audit_gid))
50463 + || (grsec_enable_execlog && !grsec_enable_group)))
50464 + return;
50465 +
50466 + mutex_lock(&gr_exec_arg_mutex);
50467 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
50468 +
50469 + if (unlikely(argv == NULL))
50470 + goto log;
50471 +
50472 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
50473 + compat_uptr_t p;
50474 + unsigned int len;
50475 +
50476 + if (get_user(p, argv + i))
50477 + goto log;
50478 + len = strnlen_user(compat_ptr(p), 128 - execlen);
50479 + if (len > 128 - execlen)
50480 + len = 128 - execlen;
50481 + else if (len > 0)
50482 + len--;
50483 + else
50484 + goto log;
50485 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
50486 + goto log;
50487 +
50488 + /* rewrite unprintable characters */
50489 + for (x = 0; x < len; x++) {
50490 + c = *(grarg + execlen + x);
50491 + if (c < 32 || c > 126)
50492 + *(grarg + execlen + x) = ' ';
50493 + }
50494 +
50495 + execlen += len;
50496 + *(grarg + execlen) = ' ';
50497 + *(grarg + execlen + 1) = '\0';
50498 + execlen++;
50499 + }
50500 +
50501 + log:
50502 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
50503 + bprm->file->f_path.mnt, grarg);
50504 + mutex_unlock(&gr_exec_arg_mutex);
50505 +#endif
50506 + return;
50507 +}
50508 +#endif
50509 diff -urNp linux-2.6.32.42/grsecurity/grsec_fifo.c linux-2.6.32.42/grsecurity/grsec_fifo.c
50510 --- linux-2.6.32.42/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
50511 +++ linux-2.6.32.42/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
50512 @@ -0,0 +1,24 @@
50513 +#include <linux/kernel.h>
50514 +#include <linux/sched.h>
50515 +#include <linux/fs.h>
50516 +#include <linux/file.h>
50517 +#include <linux/grinternal.h>
50518 +
50519 +int
50520 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
50521 + const struct dentry *dir, const int flag, const int acc_mode)
50522 +{
50523 +#ifdef CONFIG_GRKERNSEC_FIFO
50524 + const struct cred *cred = current_cred();
50525 +
50526 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
50527 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
50528 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
50529 + (cred->fsuid != dentry->d_inode->i_uid)) {
50530 + if (!inode_permission(dentry->d_inode, acc_mode))
50531 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
50532 + return -EACCES;
50533 + }
50534 +#endif
50535 + return 0;
50536 +}
50537 diff -urNp linux-2.6.32.42/grsecurity/grsec_fork.c linux-2.6.32.42/grsecurity/grsec_fork.c
50538 --- linux-2.6.32.42/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
50539 +++ linux-2.6.32.42/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
50540 @@ -0,0 +1,23 @@
50541 +#include <linux/kernel.h>
50542 +#include <linux/sched.h>
50543 +#include <linux/grsecurity.h>
50544 +#include <linux/grinternal.h>
50545 +#include <linux/errno.h>
50546 +
50547 +void
50548 +gr_log_forkfail(const int retval)
50549 +{
50550 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
50551 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
50552 + switch (retval) {
50553 + case -EAGAIN:
50554 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
50555 + break;
50556 + case -ENOMEM:
50557 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
50558 + break;
50559 + }
50560 + }
50561 +#endif
50562 + return;
50563 +}
50564 diff -urNp linux-2.6.32.42/grsecurity/grsec_init.c linux-2.6.32.42/grsecurity/grsec_init.c
50565 --- linux-2.6.32.42/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
50566 +++ linux-2.6.32.42/grsecurity/grsec_init.c 2011-04-17 15:56:46.000000000 -0400
50567 @@ -0,0 +1,270 @@
50568 +#include <linux/kernel.h>
50569 +#include <linux/sched.h>
50570 +#include <linux/mm.h>
50571 +#include <linux/smp_lock.h>
50572 +#include <linux/gracl.h>
50573 +#include <linux/slab.h>
50574 +#include <linux/vmalloc.h>
50575 +#include <linux/percpu.h>
50576 +#include <linux/module.h>
50577 +
50578 +int grsec_enable_link;
50579 +int grsec_enable_dmesg;
50580 +int grsec_enable_harden_ptrace;
50581 +int grsec_enable_fifo;
50582 +int grsec_enable_execve;
50583 +int grsec_enable_execlog;
50584 +int grsec_enable_signal;
50585 +int grsec_enable_forkfail;
50586 +int grsec_enable_audit_ptrace;
50587 +int grsec_enable_time;
50588 +int grsec_enable_audit_textrel;
50589 +int grsec_enable_group;
50590 +int grsec_audit_gid;
50591 +int grsec_enable_chdir;
50592 +int grsec_enable_mount;
50593 +int grsec_enable_rofs;
50594 +int grsec_enable_chroot_findtask;
50595 +int grsec_enable_chroot_mount;
50596 +int grsec_enable_chroot_shmat;
50597 +int grsec_enable_chroot_fchdir;
50598 +int grsec_enable_chroot_double;
50599 +int grsec_enable_chroot_pivot;
50600 +int grsec_enable_chroot_chdir;
50601 +int grsec_enable_chroot_chmod;
50602 +int grsec_enable_chroot_mknod;
50603 +int grsec_enable_chroot_nice;
50604 +int grsec_enable_chroot_execlog;
50605 +int grsec_enable_chroot_caps;
50606 +int grsec_enable_chroot_sysctl;
50607 +int grsec_enable_chroot_unix;
50608 +int grsec_enable_tpe;
50609 +int grsec_tpe_gid;
50610 +int grsec_enable_blackhole;
50611 +#ifdef CONFIG_IPV6_MODULE
50612 +EXPORT_SYMBOL(grsec_enable_blackhole);
50613 +#endif
50614 +int grsec_lastack_retries;
50615 +int grsec_enable_tpe_all;
50616 +int grsec_enable_tpe_invert;
50617 +int grsec_enable_socket_all;
50618 +int grsec_socket_all_gid;
50619 +int grsec_enable_socket_client;
50620 +int grsec_socket_client_gid;
50621 +int grsec_enable_socket_server;
50622 +int grsec_socket_server_gid;
50623 +int grsec_resource_logging;
50624 +int grsec_disable_privio;
50625 +int grsec_enable_log_rwxmaps;
50626 +int grsec_lock;
50627 +
50628 +DEFINE_SPINLOCK(grsec_alert_lock);
50629 +unsigned long grsec_alert_wtime = 0;
50630 +unsigned long grsec_alert_fyet = 0;
50631 +
50632 +DEFINE_SPINLOCK(grsec_audit_lock);
50633 +
50634 +DEFINE_RWLOCK(grsec_exec_file_lock);
50635 +
50636 +char *gr_shared_page[4];
50637 +
50638 +char *gr_alert_log_fmt;
50639 +char *gr_audit_log_fmt;
50640 +char *gr_alert_log_buf;
50641 +char *gr_audit_log_buf;
50642 +
50643 +extern struct gr_arg *gr_usermode;
50644 +extern unsigned char *gr_system_salt;
50645 +extern unsigned char *gr_system_sum;
50646 +
50647 +void __init
50648 +grsecurity_init(void)
50649 +{
50650 + int j;
50651 + /* create the per-cpu shared pages */
50652 +
50653 +#ifdef CONFIG_X86
50654 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
50655 +#endif
50656 +
50657 + for (j = 0; j < 4; j++) {
50658 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
50659 + if (gr_shared_page[j] == NULL) {
50660 + panic("Unable to allocate grsecurity shared page");
50661 + return;
50662 + }
50663 + }
50664 +
50665 + /* allocate log buffers */
50666 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
50667 + if (!gr_alert_log_fmt) {
50668 + panic("Unable to allocate grsecurity alert log format buffer");
50669 + return;
50670 + }
50671 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
50672 + if (!gr_audit_log_fmt) {
50673 + panic("Unable to allocate grsecurity audit log format buffer");
50674 + return;
50675 + }
50676 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50677 + if (!gr_alert_log_buf) {
50678 + panic("Unable to allocate grsecurity alert log buffer");
50679 + return;
50680 + }
50681 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
50682 + if (!gr_audit_log_buf) {
50683 + panic("Unable to allocate grsecurity audit log buffer");
50684 + return;
50685 + }
50686 +
50687 + /* allocate memory for authentication structure */
50688 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
50689 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
50690 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
50691 +
50692 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
50693 + panic("Unable to allocate grsecurity authentication structure");
50694 + return;
50695 + }
50696 +
50697 +
50698 +#ifdef CONFIG_GRKERNSEC_IO
50699 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
50700 + grsec_disable_privio = 1;
50701 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50702 + grsec_disable_privio = 1;
50703 +#else
50704 + grsec_disable_privio = 0;
50705 +#endif
50706 +#endif
50707 +
50708 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
50709 + /* for backward compatibility, tpe_invert always defaults to on if
50710 + enabled in the kernel
50711 + */
50712 + grsec_enable_tpe_invert = 1;
50713 +#endif
50714 +
50715 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
50716 +#ifndef CONFIG_GRKERNSEC_SYSCTL
50717 + grsec_lock = 1;
50718 +#endif
50719 +
50720 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
50721 + grsec_enable_audit_textrel = 1;
50722 +#endif
50723 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
50724 + grsec_enable_log_rwxmaps = 1;
50725 +#endif
50726 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
50727 + grsec_enable_group = 1;
50728 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
50729 +#endif
50730 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
50731 + grsec_enable_chdir = 1;
50732 +#endif
50733 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
50734 + grsec_enable_harden_ptrace = 1;
50735 +#endif
50736 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
50737 + grsec_enable_mount = 1;
50738 +#endif
50739 +#ifdef CONFIG_GRKERNSEC_LINK
50740 + grsec_enable_link = 1;
50741 +#endif
50742 +#ifdef CONFIG_GRKERNSEC_DMESG
50743 + grsec_enable_dmesg = 1;
50744 +#endif
50745 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
50746 + grsec_enable_blackhole = 1;
50747 + grsec_lastack_retries = 4;
50748 +#endif
50749 +#ifdef CONFIG_GRKERNSEC_FIFO
50750 + grsec_enable_fifo = 1;
50751 +#endif
50752 +#ifdef CONFIG_GRKERNSEC_EXECVE
50753 + grsec_enable_execve = 1;
50754 +#endif
50755 +#ifdef CONFIG_GRKERNSEC_EXECLOG
50756 + grsec_enable_execlog = 1;
50757 +#endif
50758 +#ifdef CONFIG_GRKERNSEC_SIGNAL
50759 + grsec_enable_signal = 1;
50760 +#endif
50761 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
50762 + grsec_enable_forkfail = 1;
50763 +#endif
50764 +#ifdef CONFIG_GRKERNSEC_TIME
50765 + grsec_enable_time = 1;
50766 +#endif
50767 +#ifdef CONFIG_GRKERNSEC_RESLOG
50768 + grsec_resource_logging = 1;
50769 +#endif
50770 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50771 + grsec_enable_chroot_findtask = 1;
50772 +#endif
50773 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
50774 + grsec_enable_chroot_unix = 1;
50775 +#endif
50776 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50777 + grsec_enable_chroot_mount = 1;
50778 +#endif
50779 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
50780 + grsec_enable_chroot_fchdir = 1;
50781 +#endif
50782 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50783 + grsec_enable_chroot_shmat = 1;
50784 +#endif
50785 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
50786 + grsec_enable_audit_ptrace = 1;
50787 +#endif
50788 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50789 + grsec_enable_chroot_double = 1;
50790 +#endif
50791 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50792 + grsec_enable_chroot_pivot = 1;
50793 +#endif
50794 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50795 + grsec_enable_chroot_chdir = 1;
50796 +#endif
50797 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50798 + grsec_enable_chroot_chmod = 1;
50799 +#endif
50800 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50801 + grsec_enable_chroot_mknod = 1;
50802 +#endif
50803 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50804 + grsec_enable_chroot_nice = 1;
50805 +#endif
50806 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
50807 + grsec_enable_chroot_execlog = 1;
50808 +#endif
50809 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50810 + grsec_enable_chroot_caps = 1;
50811 +#endif
50812 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50813 + grsec_enable_chroot_sysctl = 1;
50814 +#endif
50815 +#ifdef CONFIG_GRKERNSEC_TPE
50816 + grsec_enable_tpe = 1;
50817 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
50818 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
50819 + grsec_enable_tpe_all = 1;
50820 +#endif
50821 +#endif
50822 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
50823 + grsec_enable_socket_all = 1;
50824 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
50825 +#endif
50826 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
50827 + grsec_enable_socket_client = 1;
50828 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
50829 +#endif
50830 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
50831 + grsec_enable_socket_server = 1;
50832 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
50833 +#endif
50834 +#endif
50835 +
50836 + return;
50837 +}
50838 diff -urNp linux-2.6.32.42/grsecurity/grsec_link.c linux-2.6.32.42/grsecurity/grsec_link.c
50839 --- linux-2.6.32.42/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
50840 +++ linux-2.6.32.42/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
50841 @@ -0,0 +1,43 @@
50842 +#include <linux/kernel.h>
50843 +#include <linux/sched.h>
50844 +#include <linux/fs.h>
50845 +#include <linux/file.h>
50846 +#include <linux/grinternal.h>
50847 +
50848 +int
50849 +gr_handle_follow_link(const struct inode *parent,
50850 + const struct inode *inode,
50851 + const struct dentry *dentry, const struct vfsmount *mnt)
50852 +{
50853 +#ifdef CONFIG_GRKERNSEC_LINK
50854 + const struct cred *cred = current_cred();
50855 +
50856 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
50857 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
50858 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
50859 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
50860 + return -EACCES;
50861 + }
50862 +#endif
50863 + return 0;
50864 +}
50865 +
50866 +int
50867 +gr_handle_hardlink(const struct dentry *dentry,
50868 + const struct vfsmount *mnt,
50869 + struct inode *inode, const int mode, const char *to)
50870 +{
50871 +#ifdef CONFIG_GRKERNSEC_LINK
50872 + const struct cred *cred = current_cred();
50873 +
50874 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
50875 + (!S_ISREG(mode) || (mode & S_ISUID) ||
50876 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
50877 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
50878 + !capable(CAP_FOWNER) && cred->uid) {
50879 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
50880 + return -EPERM;
50881 + }
50882 +#endif
50883 + return 0;
50884 +}
50885 diff -urNp linux-2.6.32.42/grsecurity/grsec_log.c linux-2.6.32.42/grsecurity/grsec_log.c
50886 --- linux-2.6.32.42/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
50887 +++ linux-2.6.32.42/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
50888 @@ -0,0 +1,310 @@
50889 +#include <linux/kernel.h>
50890 +#include <linux/sched.h>
50891 +#include <linux/file.h>
50892 +#include <linux/tty.h>
50893 +#include <linux/fs.h>
50894 +#include <linux/grinternal.h>
50895 +
50896 +#ifdef CONFIG_TREE_PREEMPT_RCU
50897 +#define DISABLE_PREEMPT() preempt_disable()
50898 +#define ENABLE_PREEMPT() preempt_enable()
50899 +#else
50900 +#define DISABLE_PREEMPT()
50901 +#define ENABLE_PREEMPT()
50902 +#endif
50903 +
50904 +#define BEGIN_LOCKS(x) \
50905 + DISABLE_PREEMPT(); \
50906 + rcu_read_lock(); \
50907 + read_lock(&tasklist_lock); \
50908 + read_lock(&grsec_exec_file_lock); \
50909 + if (x != GR_DO_AUDIT) \
50910 + spin_lock(&grsec_alert_lock); \
50911 + else \
50912 + spin_lock(&grsec_audit_lock)
50913 +
50914 +#define END_LOCKS(x) \
50915 + if (x != GR_DO_AUDIT) \
50916 + spin_unlock(&grsec_alert_lock); \
50917 + else \
50918 + spin_unlock(&grsec_audit_lock); \
50919 + read_unlock(&grsec_exec_file_lock); \
50920 + read_unlock(&tasklist_lock); \
50921 + rcu_read_unlock(); \
50922 + ENABLE_PREEMPT(); \
50923 + if (x == GR_DONT_AUDIT) \
50924 + gr_handle_alertkill(current)
50925 +
50926 +enum {
50927 + FLOODING,
50928 + NO_FLOODING
50929 +};
50930 +
50931 +extern char *gr_alert_log_fmt;
50932 +extern char *gr_audit_log_fmt;
50933 +extern char *gr_alert_log_buf;
50934 +extern char *gr_audit_log_buf;
50935 +
50936 +static int gr_log_start(int audit)
50937 +{
50938 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
50939 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
50940 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50941 +
50942 + if (audit == GR_DO_AUDIT)
50943 + goto set_fmt;
50944 +
50945 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
50946 + grsec_alert_wtime = jiffies;
50947 + grsec_alert_fyet = 0;
50948 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
50949 + grsec_alert_fyet++;
50950 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
50951 + grsec_alert_wtime = jiffies;
50952 + grsec_alert_fyet++;
50953 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
50954 + return FLOODING;
50955 + } else return FLOODING;
50956 +
50957 +set_fmt:
50958 + memset(buf, 0, PAGE_SIZE);
50959 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
50960 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
50961 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
50962 + } else if (current->signal->curr_ip) {
50963 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
50964 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
50965 + } else if (gr_acl_is_enabled()) {
50966 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
50967 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
50968 + } else {
50969 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
50970 + strcpy(buf, fmt);
50971 + }
50972 +
50973 + return NO_FLOODING;
50974 +}
50975 +
50976 +static void gr_log_middle(int audit, const char *msg, va_list ap)
50977 + __attribute__ ((format (printf, 2, 0)));
50978 +
50979 +static void gr_log_middle(int audit, const char *msg, va_list ap)
50980 +{
50981 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50982 + unsigned int len = strlen(buf);
50983 +
50984 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
50985 +
50986 + return;
50987 +}
50988 +
50989 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
50990 + __attribute__ ((format (printf, 2, 3)));
50991 +
50992 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
50993 +{
50994 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
50995 + unsigned int len = strlen(buf);
50996 + va_list ap;
50997 +
50998 + va_start(ap, msg);
50999 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51000 + va_end(ap);
51001 +
51002 + return;
51003 +}
51004 +
51005 +static void gr_log_end(int audit)
51006 +{
51007 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51008 + unsigned int len = strlen(buf);
51009 +
51010 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
51011 + printk("%s\n", buf);
51012 +
51013 + return;
51014 +}
51015 +
51016 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
51017 +{
51018 + int logtype;
51019 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
51020 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
51021 + void *voidptr = NULL;
51022 + int num1 = 0, num2 = 0;
51023 + unsigned long ulong1 = 0, ulong2 = 0;
51024 + struct dentry *dentry = NULL;
51025 + struct vfsmount *mnt = NULL;
51026 + struct file *file = NULL;
51027 + struct task_struct *task = NULL;
51028 + const struct cred *cred, *pcred;
51029 + va_list ap;
51030 +
51031 + BEGIN_LOCKS(audit);
51032 + logtype = gr_log_start(audit);
51033 + if (logtype == FLOODING) {
51034 + END_LOCKS(audit);
51035 + return;
51036 + }
51037 + va_start(ap, argtypes);
51038 + switch (argtypes) {
51039 + case GR_TTYSNIFF:
51040 + task = va_arg(ap, struct task_struct *);
51041 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
51042 + break;
51043 + case GR_SYSCTL_HIDDEN:
51044 + str1 = va_arg(ap, char *);
51045 + gr_log_middle_varargs(audit, msg, result, str1);
51046 + break;
51047 + case GR_RBAC:
51048 + dentry = va_arg(ap, struct dentry *);
51049 + mnt = va_arg(ap, struct vfsmount *);
51050 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
51051 + break;
51052 + case GR_RBAC_STR:
51053 + dentry = va_arg(ap, struct dentry *);
51054 + mnt = va_arg(ap, struct vfsmount *);
51055 + str1 = va_arg(ap, char *);
51056 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
51057 + break;
51058 + case GR_STR_RBAC:
51059 + str1 = va_arg(ap, char *);
51060 + dentry = va_arg(ap, struct dentry *);
51061 + mnt = va_arg(ap, struct vfsmount *);
51062 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
51063 + break;
51064 + case GR_RBAC_MODE2:
51065 + dentry = va_arg(ap, struct dentry *);
51066 + mnt = va_arg(ap, struct vfsmount *);
51067 + str1 = va_arg(ap, char *);
51068 + str2 = va_arg(ap, char *);
51069 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
51070 + break;
51071 + case GR_RBAC_MODE3:
51072 + dentry = va_arg(ap, struct dentry *);
51073 + mnt = va_arg(ap, struct vfsmount *);
51074 + str1 = va_arg(ap, char *);
51075 + str2 = va_arg(ap, char *);
51076 + str3 = va_arg(ap, char *);
51077 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
51078 + break;
51079 + case GR_FILENAME:
51080 + dentry = va_arg(ap, struct dentry *);
51081 + mnt = va_arg(ap, struct vfsmount *);
51082 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
51083 + break;
51084 + case GR_STR_FILENAME:
51085 + str1 = va_arg(ap, char *);
51086 + dentry = va_arg(ap, struct dentry *);
51087 + mnt = va_arg(ap, struct vfsmount *);
51088 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
51089 + break;
51090 + case GR_FILENAME_STR:
51091 + dentry = va_arg(ap, struct dentry *);
51092 + mnt = va_arg(ap, struct vfsmount *);
51093 + str1 = va_arg(ap, char *);
51094 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
51095 + break;
51096 + case GR_FILENAME_TWO_INT:
51097 + dentry = va_arg(ap, struct dentry *);
51098 + mnt = va_arg(ap, struct vfsmount *);
51099 + num1 = va_arg(ap, int);
51100 + num2 = va_arg(ap, int);
51101 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
51102 + break;
51103 + case GR_FILENAME_TWO_INT_STR:
51104 + dentry = va_arg(ap, struct dentry *);
51105 + mnt = va_arg(ap, struct vfsmount *);
51106 + num1 = va_arg(ap, int);
51107 + num2 = va_arg(ap, int);
51108 + str1 = va_arg(ap, char *);
51109 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
51110 + break;
51111 + case GR_TEXTREL:
51112 + file = va_arg(ap, struct file *);
51113 + ulong1 = va_arg(ap, unsigned long);
51114 + ulong2 = va_arg(ap, unsigned long);
51115 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
51116 + break;
51117 + case GR_PTRACE:
51118 + task = va_arg(ap, struct task_struct *);
51119 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
51120 + break;
51121 + case GR_RESOURCE:
51122 + task = va_arg(ap, struct task_struct *);
51123 + cred = __task_cred(task);
51124 + pcred = __task_cred(task->real_parent);
51125 + ulong1 = va_arg(ap, unsigned long);
51126 + str1 = va_arg(ap, char *);
51127 + ulong2 = va_arg(ap, unsigned long);
51128 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51129 + break;
51130 + case GR_CAP:
51131 + task = va_arg(ap, struct task_struct *);
51132 + cred = __task_cred(task);
51133 + pcred = __task_cred(task->real_parent);
51134 + str1 = va_arg(ap, char *);
51135 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51136 + break;
51137 + case GR_SIG:
51138 + str1 = va_arg(ap, char *);
51139 + voidptr = va_arg(ap, void *);
51140 + gr_log_middle_varargs(audit, msg, str1, voidptr);
51141 + break;
51142 + case GR_SIG2:
51143 + task = va_arg(ap, struct task_struct *);
51144 + cred = __task_cred(task);
51145 + pcred = __task_cred(task->real_parent);
51146 + num1 = va_arg(ap, int);
51147 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51148 + break;
51149 + case GR_CRASH1:
51150 + task = va_arg(ap, struct task_struct *);
51151 + cred = __task_cred(task);
51152 + pcred = __task_cred(task->real_parent);
51153 + ulong1 = va_arg(ap, unsigned long);
51154 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
51155 + break;
51156 + case GR_CRASH2:
51157 + task = va_arg(ap, struct task_struct *);
51158 + cred = __task_cred(task);
51159 + pcred = __task_cred(task->real_parent);
51160 + ulong1 = va_arg(ap, unsigned long);
51161 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
51162 + break;
51163 + case GR_RWXMAP:
51164 + file = va_arg(ap, struct file *);
51165 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
51166 + break;
51167 + case GR_PSACCT:
51168 + {
51169 + unsigned int wday, cday;
51170 + __u8 whr, chr;
51171 + __u8 wmin, cmin;
51172 + __u8 wsec, csec;
51173 + char cur_tty[64] = { 0 };
51174 + char parent_tty[64] = { 0 };
51175 +
51176 + task = va_arg(ap, struct task_struct *);
51177 + wday = va_arg(ap, unsigned int);
51178 + cday = va_arg(ap, unsigned int);
51179 + whr = va_arg(ap, int);
51180 + chr = va_arg(ap, int);
51181 + wmin = va_arg(ap, int);
51182 + cmin = va_arg(ap, int);
51183 + wsec = va_arg(ap, int);
51184 + csec = va_arg(ap, int);
51185 + ulong1 = va_arg(ap, unsigned long);
51186 + cred = __task_cred(task);
51187 + pcred = __task_cred(task->real_parent);
51188 +
51189 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51190 + }
51191 + break;
51192 + default:
51193 + gr_log_middle(audit, msg, ap);
51194 + }
51195 + va_end(ap);
51196 + gr_log_end(audit);
51197 + END_LOCKS(audit);
51198 +}
51199 diff -urNp linux-2.6.32.42/grsecurity/grsec_mem.c linux-2.6.32.42/grsecurity/grsec_mem.c
51200 --- linux-2.6.32.42/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
51201 +++ linux-2.6.32.42/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
51202 @@ -0,0 +1,33 @@
51203 +#include <linux/kernel.h>
51204 +#include <linux/sched.h>
51205 +#include <linux/mm.h>
51206 +#include <linux/mman.h>
51207 +#include <linux/grinternal.h>
51208 +
51209 +void
51210 +gr_handle_ioperm(void)
51211 +{
51212 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
51213 + return;
51214 +}
51215 +
51216 +void
51217 +gr_handle_iopl(void)
51218 +{
51219 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
51220 + return;
51221 +}
51222 +
51223 +void
51224 +gr_handle_mem_readwrite(u64 from, u64 to)
51225 +{
51226 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
51227 + return;
51228 +}
51229 +
51230 +void
51231 +gr_handle_vm86(void)
51232 +{
51233 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
51234 + return;
51235 +}
51236 diff -urNp linux-2.6.32.42/grsecurity/grsec_mount.c linux-2.6.32.42/grsecurity/grsec_mount.c
51237 --- linux-2.6.32.42/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
51238 +++ linux-2.6.32.42/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
51239 @@ -0,0 +1,62 @@
51240 +#include <linux/kernel.h>
51241 +#include <linux/sched.h>
51242 +#include <linux/mount.h>
51243 +#include <linux/grsecurity.h>
51244 +#include <linux/grinternal.h>
51245 +
51246 +void
51247 +gr_log_remount(const char *devname, const int retval)
51248 +{
51249 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51250 + if (grsec_enable_mount && (retval >= 0))
51251 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
51252 +#endif
51253 + return;
51254 +}
51255 +
51256 +void
51257 +gr_log_unmount(const char *devname, const int retval)
51258 +{
51259 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51260 + if (grsec_enable_mount && (retval >= 0))
51261 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
51262 +#endif
51263 + return;
51264 +}
51265 +
51266 +void
51267 +gr_log_mount(const char *from, const char *to, const int retval)
51268 +{
51269 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51270 + if (grsec_enable_mount && (retval >= 0))
51271 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
51272 +#endif
51273 + return;
51274 +}
51275 +
51276 +int
51277 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
51278 +{
51279 +#ifdef CONFIG_GRKERNSEC_ROFS
51280 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
51281 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
51282 + return -EPERM;
51283 + } else
51284 + return 0;
51285 +#endif
51286 + return 0;
51287 +}
51288 +
51289 +int
51290 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
51291 +{
51292 +#ifdef CONFIG_GRKERNSEC_ROFS
51293 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
51294 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
51295 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
51296 + return -EPERM;
51297 + } else
51298 + return 0;
51299 +#endif
51300 + return 0;
51301 +}
51302 diff -urNp linux-2.6.32.42/grsecurity/grsec_pax.c linux-2.6.32.42/grsecurity/grsec_pax.c
51303 --- linux-2.6.32.42/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
51304 +++ linux-2.6.32.42/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
51305 @@ -0,0 +1,36 @@
51306 +#include <linux/kernel.h>
51307 +#include <linux/sched.h>
51308 +#include <linux/mm.h>
51309 +#include <linux/file.h>
51310 +#include <linux/grinternal.h>
51311 +#include <linux/grsecurity.h>
51312 +
51313 +void
51314 +gr_log_textrel(struct vm_area_struct * vma)
51315 +{
51316 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51317 + if (grsec_enable_audit_textrel)
51318 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
51319 +#endif
51320 + return;
51321 +}
51322 +
51323 +void
51324 +gr_log_rwxmmap(struct file *file)
51325 +{
51326 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51327 + if (grsec_enable_log_rwxmaps)
51328 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
51329 +#endif
51330 + return;
51331 +}
51332 +
51333 +void
51334 +gr_log_rwxmprotect(struct file *file)
51335 +{
51336 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51337 + if (grsec_enable_log_rwxmaps)
51338 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
51339 +#endif
51340 + return;
51341 +}
51342 diff -urNp linux-2.6.32.42/grsecurity/grsec_ptrace.c linux-2.6.32.42/grsecurity/grsec_ptrace.c
51343 --- linux-2.6.32.42/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
51344 +++ linux-2.6.32.42/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
51345 @@ -0,0 +1,14 @@
51346 +#include <linux/kernel.h>
51347 +#include <linux/sched.h>
51348 +#include <linux/grinternal.h>
51349 +#include <linux/grsecurity.h>
51350 +
51351 +void
51352 +gr_audit_ptrace(struct task_struct *task)
51353 +{
51354 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51355 + if (grsec_enable_audit_ptrace)
51356 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
51357 +#endif
51358 + return;
51359 +}
51360 diff -urNp linux-2.6.32.42/grsecurity/grsec_sig.c linux-2.6.32.42/grsecurity/grsec_sig.c
51361 --- linux-2.6.32.42/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
51362 +++ linux-2.6.32.42/grsecurity/grsec_sig.c 2011-05-17 17:30:04.000000000 -0400
51363 @@ -0,0 +1,202 @@
51364 +#include <linux/kernel.h>
51365 +#include <linux/sched.h>
51366 +#include <linux/delay.h>
51367 +#include <linux/grsecurity.h>
51368 +#include <linux/grinternal.h>
51369 +#include <linux/hardirq.h>
51370 +
51371 +char *signames[] = {
51372 + [SIGSEGV] = "Segmentation fault",
51373 + [SIGILL] = "Illegal instruction",
51374 + [SIGABRT] = "Abort",
51375 + [SIGBUS] = "Invalid alignment/Bus error"
51376 +};
51377 +
51378 +void
51379 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
51380 +{
51381 +#ifdef CONFIG_GRKERNSEC_SIGNAL
51382 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
51383 + (sig == SIGABRT) || (sig == SIGBUS))) {
51384 + if (t->pid == current->pid) {
51385 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
51386 + } else {
51387 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
51388 + }
51389 + }
51390 +#endif
51391 + return;
51392 +}
51393 +
51394 +int
51395 +gr_handle_signal(const struct task_struct *p, const int sig)
51396 +{
51397 +#ifdef CONFIG_GRKERNSEC
51398 + if (current->pid > 1 && gr_check_protected_task(p)) {
51399 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
51400 + return -EPERM;
51401 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
51402 + return -EPERM;
51403 + }
51404 +#endif
51405 + return 0;
51406 +}
51407 +
51408 +#ifdef CONFIG_GRKERNSEC
51409 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
51410 +
51411 +int gr_fake_force_sig(int sig, struct task_struct *t)
51412 +{
51413 + unsigned long int flags;
51414 + int ret, blocked, ignored;
51415 + struct k_sigaction *action;
51416 +
51417 + spin_lock_irqsave(&t->sighand->siglock, flags);
51418 + action = &t->sighand->action[sig-1];
51419 + ignored = action->sa.sa_handler == SIG_IGN;
51420 + blocked = sigismember(&t->blocked, sig);
51421 + if (blocked || ignored) {
51422 + action->sa.sa_handler = SIG_DFL;
51423 + if (blocked) {
51424 + sigdelset(&t->blocked, sig);
51425 + recalc_sigpending_and_wake(t);
51426 + }
51427 + }
51428 + if (action->sa.sa_handler == SIG_DFL)
51429 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
51430 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
51431 +
51432 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
51433 +
51434 + return ret;
51435 +}
51436 +#endif
51437 +
51438 +#ifdef CONFIG_GRKERNSEC_BRUTE
51439 +#define GR_USER_BAN_TIME (15 * 60)
51440 +
51441 +static int __get_dumpable(unsigned long mm_flags)
51442 +{
51443 + int ret;
51444 +
51445 + ret = mm_flags & MMF_DUMPABLE_MASK;
51446 + return (ret >= 2) ? 2 : ret;
51447 +}
51448 +#endif
51449 +
51450 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
51451 +{
51452 +#ifdef CONFIG_GRKERNSEC_BRUTE
51453 + uid_t uid = 0;
51454 +
51455 + rcu_read_lock();
51456 + read_lock(&tasklist_lock);
51457 + read_lock(&grsec_exec_file_lock);
51458 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
51459 + p->real_parent->brute = 1;
51460 + else {
51461 + const struct cred *cred = __task_cred(p), *cred2;
51462 + struct task_struct *tsk, *tsk2;
51463 +
51464 + if (!__get_dumpable(mm_flags) && cred->uid) {
51465 + struct user_struct *user;
51466 +
51467 + uid = cred->uid;
51468 +
51469 + /* this is put upon execution past expiration */
51470 + user = find_user(uid);
51471 + if (user == NULL)
51472 + goto unlock;
51473 + user->banned = 1;
51474 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
51475 + if (user->ban_expires == ~0UL)
51476 + user->ban_expires--;
51477 +
51478 + do_each_thread(tsk2, tsk) {
51479 + cred2 = __task_cred(tsk);
51480 + if (tsk != p && cred2->uid == uid)
51481 + gr_fake_force_sig(SIGKILL, tsk);
51482 + } while_each_thread(tsk2, tsk);
51483 + }
51484 + }
51485 +unlock:
51486 + read_unlock(&grsec_exec_file_lock);
51487 + read_unlock(&tasklist_lock);
51488 + rcu_read_unlock();
51489 +
51490 + if (uid)
51491 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
51492 +#endif
51493 + return;
51494 +}
51495 +
51496 +void gr_handle_brute_check(void)
51497 +{
51498 +#ifdef CONFIG_GRKERNSEC_BRUTE
51499 + if (current->brute)
51500 + msleep(30 * 1000);
51501 +#endif
51502 + return;
51503 +}
51504 +
51505 +void gr_handle_kernel_exploit(void)
51506 +{
51507 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
51508 + const struct cred *cred;
51509 + struct task_struct *tsk, *tsk2;
51510 + struct user_struct *user;
51511 + uid_t uid;
51512 +
51513 + if (in_irq() || in_serving_softirq() || in_nmi())
51514 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
51515 +
51516 + uid = current_uid();
51517 +
51518 + if (uid == 0)
51519 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
51520 + else {
51521 + /* kill all the processes of this user, hold a reference
51522 + to their creds struct, and prevent them from creating
51523 + another process until system reset
51524 + */
51525 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
51526 + /* we intentionally leak this ref */
51527 + user = get_uid(current->cred->user);
51528 + if (user) {
51529 + user->banned = 1;
51530 + user->ban_expires = ~0UL;
51531 + }
51532 +
51533 + read_lock(&tasklist_lock);
51534 + do_each_thread(tsk2, tsk) {
51535 + cred = __task_cred(tsk);
51536 + if (cred->uid == uid)
51537 + gr_fake_force_sig(SIGKILL, tsk);
51538 + } while_each_thread(tsk2, tsk);
51539 + read_unlock(&tasklist_lock);
51540 + }
51541 +#endif
51542 +}
51543 +
51544 +int __gr_process_user_ban(struct user_struct *user)
51545 +{
51546 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51547 + if (unlikely(user->banned)) {
51548 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
51549 + user->banned = 0;
51550 + user->ban_expires = 0;
51551 + free_uid(user);
51552 + } else
51553 + return -EPERM;
51554 + }
51555 +#endif
51556 + return 0;
51557 +}
51558 +
51559 +int gr_process_user_ban(void)
51560 +{
51561 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
51562 + return __gr_process_user_ban(current->cred->user);
51563 +#endif
51564 + return 0;
51565 +}
51566 diff -urNp linux-2.6.32.42/grsecurity/grsec_sock.c linux-2.6.32.42/grsecurity/grsec_sock.c
51567 --- linux-2.6.32.42/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
51568 +++ linux-2.6.32.42/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
51569 @@ -0,0 +1,275 @@
51570 +#include <linux/kernel.h>
51571 +#include <linux/module.h>
51572 +#include <linux/sched.h>
51573 +#include <linux/file.h>
51574 +#include <linux/net.h>
51575 +#include <linux/in.h>
51576 +#include <linux/ip.h>
51577 +#include <net/sock.h>
51578 +#include <net/inet_sock.h>
51579 +#include <linux/grsecurity.h>
51580 +#include <linux/grinternal.h>
51581 +#include <linux/gracl.h>
51582 +
51583 +kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
51584 +EXPORT_SYMBOL(gr_cap_rtnetlink);
51585 +
51586 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
51587 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
51588 +
51589 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
51590 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
51591 +
51592 +#ifdef CONFIG_UNIX_MODULE
51593 +EXPORT_SYMBOL(gr_acl_handle_unix);
51594 +EXPORT_SYMBOL(gr_acl_handle_mknod);
51595 +EXPORT_SYMBOL(gr_handle_chroot_unix);
51596 +EXPORT_SYMBOL(gr_handle_create);
51597 +#endif
51598 +
51599 +#ifdef CONFIG_GRKERNSEC
51600 +#define gr_conn_table_size 32749
51601 +struct conn_table_entry {
51602 + struct conn_table_entry *next;
51603 + struct signal_struct *sig;
51604 +};
51605 +
51606 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
51607 +DEFINE_SPINLOCK(gr_conn_table_lock);
51608 +
51609 +extern const char * gr_socktype_to_name(unsigned char type);
51610 +extern const char * gr_proto_to_name(unsigned char proto);
51611 +extern const char * gr_sockfamily_to_name(unsigned char family);
51612 +
51613 +static __inline__ int
51614 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
51615 +{
51616 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
51617 +}
51618 +
51619 +static __inline__ int
51620 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
51621 + __u16 sport, __u16 dport)
51622 +{
51623 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
51624 + sig->gr_sport == sport && sig->gr_dport == dport))
51625 + return 1;
51626 + else
51627 + return 0;
51628 +}
51629 +
51630 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
51631 +{
51632 + struct conn_table_entry **match;
51633 + unsigned int index;
51634 +
51635 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51636 + sig->gr_sport, sig->gr_dport,
51637 + gr_conn_table_size);
51638 +
51639 + newent->sig = sig;
51640 +
51641 + match = &gr_conn_table[index];
51642 + newent->next = *match;
51643 + *match = newent;
51644 +
51645 + return;
51646 +}
51647 +
51648 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
51649 +{
51650 + struct conn_table_entry *match, *last = NULL;
51651 + unsigned int index;
51652 +
51653 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
51654 + sig->gr_sport, sig->gr_dport,
51655 + gr_conn_table_size);
51656 +
51657 + match = gr_conn_table[index];
51658 + while (match && !conn_match(match->sig,
51659 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
51660 + sig->gr_dport)) {
51661 + last = match;
51662 + match = match->next;
51663 + }
51664 +
51665 + if (match) {
51666 + if (last)
51667 + last->next = match->next;
51668 + else
51669 + gr_conn_table[index] = NULL;
51670 + kfree(match);
51671 + }
51672 +
51673 + return;
51674 +}
51675 +
51676 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
51677 + __u16 sport, __u16 dport)
51678 +{
51679 + struct conn_table_entry *match;
51680 + unsigned int index;
51681 +
51682 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
51683 +
51684 + match = gr_conn_table[index];
51685 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
51686 + match = match->next;
51687 +
51688 + if (match)
51689 + return match->sig;
51690 + else
51691 + return NULL;
51692 +}
51693 +
51694 +#endif
51695 +
51696 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
51697 +{
51698 +#ifdef CONFIG_GRKERNSEC
51699 + struct signal_struct *sig = task->signal;
51700 + struct conn_table_entry *newent;
51701 +
51702 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
51703 + if (newent == NULL)
51704 + return;
51705 + /* no bh lock needed since we are called with bh disabled */
51706 + spin_lock(&gr_conn_table_lock);
51707 + gr_del_task_from_ip_table_nolock(sig);
51708 + sig->gr_saddr = inet->rcv_saddr;
51709 + sig->gr_daddr = inet->daddr;
51710 + sig->gr_sport = inet->sport;
51711 + sig->gr_dport = inet->dport;
51712 + gr_add_to_task_ip_table_nolock(sig, newent);
51713 + spin_unlock(&gr_conn_table_lock);
51714 +#endif
51715 + return;
51716 +}
51717 +
51718 +void gr_del_task_from_ip_table(struct task_struct *task)
51719 +{
51720 +#ifdef CONFIG_GRKERNSEC
51721 + spin_lock_bh(&gr_conn_table_lock);
51722 + gr_del_task_from_ip_table_nolock(task->signal);
51723 + spin_unlock_bh(&gr_conn_table_lock);
51724 +#endif
51725 + return;
51726 +}
51727 +
51728 +void
51729 +gr_attach_curr_ip(const struct sock *sk)
51730 +{
51731 +#ifdef CONFIG_GRKERNSEC
51732 + struct signal_struct *p, *set;
51733 + const struct inet_sock *inet = inet_sk(sk);
51734 +
51735 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
51736 + return;
51737 +
51738 + set = current->signal;
51739 +
51740 + spin_lock_bh(&gr_conn_table_lock);
51741 + p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
51742 + inet->dport, inet->sport);
51743 + if (unlikely(p != NULL)) {
51744 + set->curr_ip = p->curr_ip;
51745 + set->used_accept = 1;
51746 + gr_del_task_from_ip_table_nolock(p);
51747 + spin_unlock_bh(&gr_conn_table_lock);
51748 + return;
51749 + }
51750 + spin_unlock_bh(&gr_conn_table_lock);
51751 +
51752 + set->curr_ip = inet->daddr;
51753 + set->used_accept = 1;
51754 +#endif
51755 + return;
51756 +}
51757 +
51758 +int
51759 +gr_handle_sock_all(const int family, const int type, const int protocol)
51760 +{
51761 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51762 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
51763 + (family != AF_UNIX)) {
51764 + if (family == AF_INET)
51765 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
51766 + else
51767 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
51768 + return -EACCES;
51769 + }
51770 +#endif
51771 + return 0;
51772 +}
51773 +
51774 +int
51775 +gr_handle_sock_server(const struct sockaddr *sck)
51776 +{
51777 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51778 + if (grsec_enable_socket_server &&
51779 + in_group_p(grsec_socket_server_gid) &&
51780 + sck && (sck->sa_family != AF_UNIX) &&
51781 + (sck->sa_family != AF_LOCAL)) {
51782 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51783 + return -EACCES;
51784 + }
51785 +#endif
51786 + return 0;
51787 +}
51788 +
51789 +int
51790 +gr_handle_sock_server_other(const struct sock *sck)
51791 +{
51792 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51793 + if (grsec_enable_socket_server &&
51794 + in_group_p(grsec_socket_server_gid) &&
51795 + sck && (sck->sk_family != AF_UNIX) &&
51796 + (sck->sk_family != AF_LOCAL)) {
51797 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
51798 + return -EACCES;
51799 + }
51800 +#endif
51801 + return 0;
51802 +}
51803 +
51804 +int
51805 +gr_handle_sock_client(const struct sockaddr *sck)
51806 +{
51807 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51808 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
51809 + sck && (sck->sa_family != AF_UNIX) &&
51810 + (sck->sa_family != AF_LOCAL)) {
51811 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
51812 + return -EACCES;
51813 + }
51814 +#endif
51815 + return 0;
51816 +}
51817 +
51818 +kernel_cap_t
51819 +gr_cap_rtnetlink(struct sock *sock)
51820 +{
51821 +#ifdef CONFIG_GRKERNSEC
51822 + if (!gr_acl_is_enabled())
51823 + return current_cap();
51824 + else if (sock->sk_protocol == NETLINK_ISCSI &&
51825 + cap_raised(current_cap(), CAP_SYS_ADMIN) &&
51826 + gr_is_capable(CAP_SYS_ADMIN))
51827 + return current_cap();
51828 + else if (sock->sk_protocol == NETLINK_AUDIT &&
51829 + cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
51830 + gr_is_capable(CAP_AUDIT_WRITE) &&
51831 + cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
51832 + gr_is_capable(CAP_AUDIT_CONTROL))
51833 + return current_cap();
51834 + else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
51835 + ((sock->sk_protocol == NETLINK_ROUTE) ?
51836 + gr_is_capable_nolog(CAP_NET_ADMIN) :
51837 + gr_is_capable(CAP_NET_ADMIN)))
51838 + return current_cap();
51839 + else
51840 + return __cap_empty_set;
51841 +#else
51842 + return current_cap();
51843 +#endif
51844 +}
51845 diff -urNp linux-2.6.32.42/grsecurity/grsec_sysctl.c linux-2.6.32.42/grsecurity/grsec_sysctl.c
51846 --- linux-2.6.32.42/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
51847 +++ linux-2.6.32.42/grsecurity/grsec_sysctl.c 2011-04-17 15:56:46.000000000 -0400
51848 @@ -0,0 +1,479 @@
51849 +#include <linux/kernel.h>
51850 +#include <linux/sched.h>
51851 +#include <linux/sysctl.h>
51852 +#include <linux/grsecurity.h>
51853 +#include <linux/grinternal.h>
51854 +
51855 +int
51856 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
51857 +{
51858 +#ifdef CONFIG_GRKERNSEC_SYSCTL
51859 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
51860 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
51861 + return -EACCES;
51862 + }
51863 +#endif
51864 + return 0;
51865 +}
51866 +
51867 +#ifdef CONFIG_GRKERNSEC_ROFS
51868 +static int __maybe_unused one = 1;
51869 +#endif
51870 +
51871 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
51872 +ctl_table grsecurity_table[] = {
51873 +#ifdef CONFIG_GRKERNSEC_SYSCTL
51874 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
51875 +#ifdef CONFIG_GRKERNSEC_IO
51876 + {
51877 + .ctl_name = CTL_UNNUMBERED,
51878 + .procname = "disable_priv_io",
51879 + .data = &grsec_disable_privio,
51880 + .maxlen = sizeof(int),
51881 + .mode = 0600,
51882 + .proc_handler = &proc_dointvec,
51883 + },
51884 +#endif
51885 +#endif
51886 +#ifdef CONFIG_GRKERNSEC_LINK
51887 + {
51888 + .ctl_name = CTL_UNNUMBERED,
51889 + .procname = "linking_restrictions",
51890 + .data = &grsec_enable_link,
51891 + .maxlen = sizeof(int),
51892 + .mode = 0600,
51893 + .proc_handler = &proc_dointvec,
51894 + },
51895 +#endif
51896 +#ifdef CONFIG_GRKERNSEC_FIFO
51897 + {
51898 + .ctl_name = CTL_UNNUMBERED,
51899 + .procname = "fifo_restrictions",
51900 + .data = &grsec_enable_fifo,
51901 + .maxlen = sizeof(int),
51902 + .mode = 0600,
51903 + .proc_handler = &proc_dointvec,
51904 + },
51905 +#endif
51906 +#ifdef CONFIG_GRKERNSEC_EXECVE
51907 + {
51908 + .ctl_name = CTL_UNNUMBERED,
51909 + .procname = "execve_limiting",
51910 + .data = &grsec_enable_execve,
51911 + .maxlen = sizeof(int),
51912 + .mode = 0600,
51913 + .proc_handler = &proc_dointvec,
51914 + },
51915 +#endif
51916 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
51917 + {
51918 + .ctl_name = CTL_UNNUMBERED,
51919 + .procname = "ip_blackhole",
51920 + .data = &grsec_enable_blackhole,
51921 + .maxlen = sizeof(int),
51922 + .mode = 0600,
51923 + .proc_handler = &proc_dointvec,
51924 + },
51925 + {
51926 + .ctl_name = CTL_UNNUMBERED,
51927 + .procname = "lastack_retries",
51928 + .data = &grsec_lastack_retries,
51929 + .maxlen = sizeof(int),
51930 + .mode = 0600,
51931 + .proc_handler = &proc_dointvec,
51932 + },
51933 +#endif
51934 +#ifdef CONFIG_GRKERNSEC_EXECLOG
51935 + {
51936 + .ctl_name = CTL_UNNUMBERED,
51937 + .procname = "exec_logging",
51938 + .data = &grsec_enable_execlog,
51939 + .maxlen = sizeof(int),
51940 + .mode = 0600,
51941 + .proc_handler = &proc_dointvec,
51942 + },
51943 +#endif
51944 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51945 + {
51946 + .ctl_name = CTL_UNNUMBERED,
51947 + .procname = "rwxmap_logging",
51948 + .data = &grsec_enable_log_rwxmaps,
51949 + .maxlen = sizeof(int),
51950 + .mode = 0600,
51951 + .proc_handler = &proc_dointvec,
51952 + },
51953 +#endif
51954 +#ifdef CONFIG_GRKERNSEC_SIGNAL
51955 + {
51956 + .ctl_name = CTL_UNNUMBERED,
51957 + .procname = "signal_logging",
51958 + .data = &grsec_enable_signal,
51959 + .maxlen = sizeof(int),
51960 + .mode = 0600,
51961 + .proc_handler = &proc_dointvec,
51962 + },
51963 +#endif
51964 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
51965 + {
51966 + .ctl_name = CTL_UNNUMBERED,
51967 + .procname = "forkfail_logging",
51968 + .data = &grsec_enable_forkfail,
51969 + .maxlen = sizeof(int),
51970 + .mode = 0600,
51971 + .proc_handler = &proc_dointvec,
51972 + },
51973 +#endif
51974 +#ifdef CONFIG_GRKERNSEC_TIME
51975 + {
51976 + .ctl_name = CTL_UNNUMBERED,
51977 + .procname = "timechange_logging",
51978 + .data = &grsec_enable_time,
51979 + .maxlen = sizeof(int),
51980 + .mode = 0600,
51981 + .proc_handler = &proc_dointvec,
51982 + },
51983 +#endif
51984 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
51985 + {
51986 + .ctl_name = CTL_UNNUMBERED,
51987 + .procname = "chroot_deny_shmat",
51988 + .data = &grsec_enable_chroot_shmat,
51989 + .maxlen = sizeof(int),
51990 + .mode = 0600,
51991 + .proc_handler = &proc_dointvec,
51992 + },
51993 +#endif
51994 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
51995 + {
51996 + .ctl_name = CTL_UNNUMBERED,
51997 + .procname = "chroot_deny_unix",
51998 + .data = &grsec_enable_chroot_unix,
51999 + .maxlen = sizeof(int),
52000 + .mode = 0600,
52001 + .proc_handler = &proc_dointvec,
52002 + },
52003 +#endif
52004 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52005 + {
52006 + .ctl_name = CTL_UNNUMBERED,
52007 + .procname = "chroot_deny_mount",
52008 + .data = &grsec_enable_chroot_mount,
52009 + .maxlen = sizeof(int),
52010 + .mode = 0600,
52011 + .proc_handler = &proc_dointvec,
52012 + },
52013 +#endif
52014 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52015 + {
52016 + .ctl_name = CTL_UNNUMBERED,
52017 + .procname = "chroot_deny_fchdir",
52018 + .data = &grsec_enable_chroot_fchdir,
52019 + .maxlen = sizeof(int),
52020 + .mode = 0600,
52021 + .proc_handler = &proc_dointvec,
52022 + },
52023 +#endif
52024 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52025 + {
52026 + .ctl_name = CTL_UNNUMBERED,
52027 + .procname = "chroot_deny_chroot",
52028 + .data = &grsec_enable_chroot_double,
52029 + .maxlen = sizeof(int),
52030 + .mode = 0600,
52031 + .proc_handler = &proc_dointvec,
52032 + },
52033 +#endif
52034 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52035 + {
52036 + .ctl_name = CTL_UNNUMBERED,
52037 + .procname = "chroot_deny_pivot",
52038 + .data = &grsec_enable_chroot_pivot,
52039 + .maxlen = sizeof(int),
52040 + .mode = 0600,
52041 + .proc_handler = &proc_dointvec,
52042 + },
52043 +#endif
52044 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52045 + {
52046 + .ctl_name = CTL_UNNUMBERED,
52047 + .procname = "chroot_enforce_chdir",
52048 + .data = &grsec_enable_chroot_chdir,
52049 + .maxlen = sizeof(int),
52050 + .mode = 0600,
52051 + .proc_handler = &proc_dointvec,
52052 + },
52053 +#endif
52054 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52055 + {
52056 + .ctl_name = CTL_UNNUMBERED,
52057 + .procname = "chroot_deny_chmod",
52058 + .data = &grsec_enable_chroot_chmod,
52059 + .maxlen = sizeof(int),
52060 + .mode = 0600,
52061 + .proc_handler = &proc_dointvec,
52062 + },
52063 +#endif
52064 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52065 + {
52066 + .ctl_name = CTL_UNNUMBERED,
52067 + .procname = "chroot_deny_mknod",
52068 + .data = &grsec_enable_chroot_mknod,
52069 + .maxlen = sizeof(int),
52070 + .mode = 0600,
52071 + .proc_handler = &proc_dointvec,
52072 + },
52073 +#endif
52074 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52075 + {
52076 + .ctl_name = CTL_UNNUMBERED,
52077 + .procname = "chroot_restrict_nice",
52078 + .data = &grsec_enable_chroot_nice,
52079 + .maxlen = sizeof(int),
52080 + .mode = 0600,
52081 + .proc_handler = &proc_dointvec,
52082 + },
52083 +#endif
52084 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52085 + {
52086 + .ctl_name = CTL_UNNUMBERED,
52087 + .procname = "chroot_execlog",
52088 + .data = &grsec_enable_chroot_execlog,
52089 + .maxlen = sizeof(int),
52090 + .mode = 0600,
52091 + .proc_handler = &proc_dointvec,
52092 + },
52093 +#endif
52094 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52095 + {
52096 + .ctl_name = CTL_UNNUMBERED,
52097 + .procname = "chroot_caps",
52098 + .data = &grsec_enable_chroot_caps,
52099 + .maxlen = sizeof(int),
52100 + .mode = 0600,
52101 + .proc_handler = &proc_dointvec,
52102 + },
52103 +#endif
52104 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52105 + {
52106 + .ctl_name = CTL_UNNUMBERED,
52107 + .procname = "chroot_deny_sysctl",
52108 + .data = &grsec_enable_chroot_sysctl,
52109 + .maxlen = sizeof(int),
52110 + .mode = 0600,
52111 + .proc_handler = &proc_dointvec,
52112 + },
52113 +#endif
52114 +#ifdef CONFIG_GRKERNSEC_TPE
52115 + {
52116 + .ctl_name = CTL_UNNUMBERED,
52117 + .procname = "tpe",
52118 + .data = &grsec_enable_tpe,
52119 + .maxlen = sizeof(int),
52120 + .mode = 0600,
52121 + .proc_handler = &proc_dointvec,
52122 + },
52123 + {
52124 + .ctl_name = CTL_UNNUMBERED,
52125 + .procname = "tpe_gid",
52126 + .data = &grsec_tpe_gid,
52127 + .maxlen = sizeof(int),
52128 + .mode = 0600,
52129 + .proc_handler = &proc_dointvec,
52130 + },
52131 +#endif
52132 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52133 + {
52134 + .ctl_name = CTL_UNNUMBERED,
52135 + .procname = "tpe_invert",
52136 + .data = &grsec_enable_tpe_invert,
52137 + .maxlen = sizeof(int),
52138 + .mode = 0600,
52139 + .proc_handler = &proc_dointvec,
52140 + },
52141 +#endif
52142 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52143 + {
52144 + .ctl_name = CTL_UNNUMBERED,
52145 + .procname = "tpe_restrict_all",
52146 + .data = &grsec_enable_tpe_all,
52147 + .maxlen = sizeof(int),
52148 + .mode = 0600,
52149 + .proc_handler = &proc_dointvec,
52150 + },
52151 +#endif
52152 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52153 + {
52154 + .ctl_name = CTL_UNNUMBERED,
52155 + .procname = "socket_all",
52156 + .data = &grsec_enable_socket_all,
52157 + .maxlen = sizeof(int),
52158 + .mode = 0600,
52159 + .proc_handler = &proc_dointvec,
52160 + },
52161 + {
52162 + .ctl_name = CTL_UNNUMBERED,
52163 + .procname = "socket_all_gid",
52164 + .data = &grsec_socket_all_gid,
52165 + .maxlen = sizeof(int),
52166 + .mode = 0600,
52167 + .proc_handler = &proc_dointvec,
52168 + },
52169 +#endif
52170 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52171 + {
52172 + .ctl_name = CTL_UNNUMBERED,
52173 + .procname = "socket_client",
52174 + .data = &grsec_enable_socket_client,
52175 + .maxlen = sizeof(int),
52176 + .mode = 0600,
52177 + .proc_handler = &proc_dointvec,
52178 + },
52179 + {
52180 + .ctl_name = CTL_UNNUMBERED,
52181 + .procname = "socket_client_gid",
52182 + .data = &grsec_socket_client_gid,
52183 + .maxlen = sizeof(int),
52184 + .mode = 0600,
52185 + .proc_handler = &proc_dointvec,
52186 + },
52187 +#endif
52188 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52189 + {
52190 + .ctl_name = CTL_UNNUMBERED,
52191 + .procname = "socket_server",
52192 + .data = &grsec_enable_socket_server,
52193 + .maxlen = sizeof(int),
52194 + .mode = 0600,
52195 + .proc_handler = &proc_dointvec,
52196 + },
52197 + {
52198 + .ctl_name = CTL_UNNUMBERED,
52199 + .procname = "socket_server_gid",
52200 + .data = &grsec_socket_server_gid,
52201 + .maxlen = sizeof(int),
52202 + .mode = 0600,
52203 + .proc_handler = &proc_dointvec,
52204 + },
52205 +#endif
52206 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
52207 + {
52208 + .ctl_name = CTL_UNNUMBERED,
52209 + .procname = "audit_group",
52210 + .data = &grsec_enable_group,
52211 + .maxlen = sizeof(int),
52212 + .mode = 0600,
52213 + .proc_handler = &proc_dointvec,
52214 + },
52215 + {
52216 + .ctl_name = CTL_UNNUMBERED,
52217 + .procname = "audit_gid",
52218 + .data = &grsec_audit_gid,
52219 + .maxlen = sizeof(int),
52220 + .mode = 0600,
52221 + .proc_handler = &proc_dointvec,
52222 + },
52223 +#endif
52224 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52225 + {
52226 + .ctl_name = CTL_UNNUMBERED,
52227 + .procname = "audit_chdir",
52228 + .data = &grsec_enable_chdir,
52229 + .maxlen = sizeof(int),
52230 + .mode = 0600,
52231 + .proc_handler = &proc_dointvec,
52232 + },
52233 +#endif
52234 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52235 + {
52236 + .ctl_name = CTL_UNNUMBERED,
52237 + .procname = "audit_mount",
52238 + .data = &grsec_enable_mount,
52239 + .maxlen = sizeof(int),
52240 + .mode = 0600,
52241 + .proc_handler = &proc_dointvec,
52242 + },
52243 +#endif
52244 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52245 + {
52246 + .ctl_name = CTL_UNNUMBERED,
52247 + .procname = "audit_textrel",
52248 + .data = &grsec_enable_audit_textrel,
52249 + .maxlen = sizeof(int),
52250 + .mode = 0600,
52251 + .proc_handler = &proc_dointvec,
52252 + },
52253 +#endif
52254 +#ifdef CONFIG_GRKERNSEC_DMESG
52255 + {
52256 + .ctl_name = CTL_UNNUMBERED,
52257 + .procname = "dmesg",
52258 + .data = &grsec_enable_dmesg,
52259 + .maxlen = sizeof(int),
52260 + .mode = 0600,
52261 + .proc_handler = &proc_dointvec,
52262 + },
52263 +#endif
52264 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
52265 + {
52266 + .ctl_name = CTL_UNNUMBERED,
52267 + .procname = "chroot_findtask",
52268 + .data = &grsec_enable_chroot_findtask,
52269 + .maxlen = sizeof(int),
52270 + .mode = 0600,
52271 + .proc_handler = &proc_dointvec,
52272 + },
52273 +#endif
52274 +#ifdef CONFIG_GRKERNSEC_RESLOG
52275 + {
52276 + .ctl_name = CTL_UNNUMBERED,
52277 + .procname = "resource_logging",
52278 + .data = &grsec_resource_logging,
52279 + .maxlen = sizeof(int),
52280 + .mode = 0600,
52281 + .proc_handler = &proc_dointvec,
52282 + },
52283 +#endif
52284 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52285 + {
52286 + .ctl_name = CTL_UNNUMBERED,
52287 + .procname = "audit_ptrace",
52288 + .data = &grsec_enable_audit_ptrace,
52289 + .maxlen = sizeof(int),
52290 + .mode = 0600,
52291 + .proc_handler = &proc_dointvec,
52292 + },
52293 +#endif
52294 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
52295 + {
52296 + .ctl_name = CTL_UNNUMBERED,
52297 + .procname = "harden_ptrace",
52298 + .data = &grsec_enable_harden_ptrace,
52299 + .maxlen = sizeof(int),
52300 + .mode = 0600,
52301 + .proc_handler = &proc_dointvec,
52302 + },
52303 +#endif
52304 + {
52305 + .ctl_name = CTL_UNNUMBERED,
52306 + .procname = "grsec_lock",
52307 + .data = &grsec_lock,
52308 + .maxlen = sizeof(int),
52309 + .mode = 0600,
52310 + .proc_handler = &proc_dointvec,
52311 + },
52312 +#endif
52313 +#ifdef CONFIG_GRKERNSEC_ROFS
52314 + {
52315 + .ctl_name = CTL_UNNUMBERED,
52316 + .procname = "romount_protect",
52317 + .data = &grsec_enable_rofs,
52318 + .maxlen = sizeof(int),
52319 + .mode = 0600,
52320 + .proc_handler = &proc_dointvec_minmax,
52321 + .extra1 = &one,
52322 + .extra2 = &one,
52323 + },
52324 +#endif
52325 + { .ctl_name = 0 }
52326 +};
52327 +#endif
52328 diff -urNp linux-2.6.32.42/grsecurity/grsec_time.c linux-2.6.32.42/grsecurity/grsec_time.c
52329 --- linux-2.6.32.42/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
52330 +++ linux-2.6.32.42/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
52331 @@ -0,0 +1,16 @@
52332 +#include <linux/kernel.h>
52333 +#include <linux/sched.h>
52334 +#include <linux/grinternal.h>
52335 +#include <linux/module.h>
52336 +
52337 +void
52338 +gr_log_timechange(void)
52339 +{
52340 +#ifdef CONFIG_GRKERNSEC_TIME
52341 + if (grsec_enable_time)
52342 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
52343 +#endif
52344 + return;
52345 +}
52346 +
52347 +EXPORT_SYMBOL(gr_log_timechange);
52348 diff -urNp linux-2.6.32.42/grsecurity/grsec_tpe.c linux-2.6.32.42/grsecurity/grsec_tpe.c
52349 --- linux-2.6.32.42/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
52350 +++ linux-2.6.32.42/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
52351 @@ -0,0 +1,39 @@
52352 +#include <linux/kernel.h>
52353 +#include <linux/sched.h>
52354 +#include <linux/file.h>
52355 +#include <linux/fs.h>
52356 +#include <linux/grinternal.h>
52357 +
52358 +extern int gr_acl_tpe_check(void);
52359 +
52360 +int
52361 +gr_tpe_allow(const struct file *file)
52362 +{
52363 +#ifdef CONFIG_GRKERNSEC
52364 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
52365 + const struct cred *cred = current_cred();
52366 +
52367 + if (cred->uid && ((grsec_enable_tpe &&
52368 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52369 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
52370 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
52371 +#else
52372 + in_group_p(grsec_tpe_gid)
52373 +#endif
52374 + ) || gr_acl_tpe_check()) &&
52375 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
52376 + (inode->i_mode & S_IWOTH))))) {
52377 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52378 + return 0;
52379 + }
52380 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
52381 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
52382 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
52383 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
52384 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
52385 + return 0;
52386 + }
52387 +#endif
52388 +#endif
52389 + return 1;
52390 +}
52391 diff -urNp linux-2.6.32.42/grsecurity/grsum.c linux-2.6.32.42/grsecurity/grsum.c
52392 --- linux-2.6.32.42/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
52393 +++ linux-2.6.32.42/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
52394 @@ -0,0 +1,61 @@
52395 +#include <linux/err.h>
52396 +#include <linux/kernel.h>
52397 +#include <linux/sched.h>
52398 +#include <linux/mm.h>
52399 +#include <linux/scatterlist.h>
52400 +#include <linux/crypto.h>
52401 +#include <linux/gracl.h>
52402 +
52403 +
52404 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
52405 +#error "crypto and sha256 must be built into the kernel"
52406 +#endif
52407 +
52408 +int
52409 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
52410 +{
52411 + char *p;
52412 + struct crypto_hash *tfm;
52413 + struct hash_desc desc;
52414 + struct scatterlist sg;
52415 + unsigned char temp_sum[GR_SHA_LEN];
52416 + volatile int retval = 0;
52417 + volatile int dummy = 0;
52418 + unsigned int i;
52419 +
52420 + sg_init_table(&sg, 1);
52421 +
52422 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
52423 + if (IS_ERR(tfm)) {
52424 + /* should never happen, since sha256 should be built in */
52425 + return 1;
52426 + }
52427 +
52428 + desc.tfm = tfm;
52429 + desc.flags = 0;
52430 +
52431 + crypto_hash_init(&desc);
52432 +
52433 + p = salt;
52434 + sg_set_buf(&sg, p, GR_SALT_LEN);
52435 + crypto_hash_update(&desc, &sg, sg.length);
52436 +
52437 + p = entry->pw;
52438 + sg_set_buf(&sg, p, strlen(p));
52439 +
52440 + crypto_hash_update(&desc, &sg, sg.length);
52441 +
52442 + crypto_hash_final(&desc, temp_sum);
52443 +
52444 + memset(entry->pw, 0, GR_PW_LEN);
52445 +
52446 + for (i = 0; i < GR_SHA_LEN; i++)
52447 + if (sum[i] != temp_sum[i])
52448 + retval = 1;
52449 + else
52450 + dummy = 1; // waste a cycle
52451 +
52452 + crypto_free_hash(tfm);
52453 +
52454 + return retval;
52455 +}
52456 diff -urNp linux-2.6.32.42/grsecurity/Kconfig linux-2.6.32.42/grsecurity/Kconfig
52457 --- linux-2.6.32.42/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
52458 +++ linux-2.6.32.42/grsecurity/Kconfig 2011-06-13 21:34:09.000000000 -0400
52459 @@ -0,0 +1,1045 @@
52460 +#
52461 +# grecurity configuration
52462 +#
52463 +
52464 +menu "Grsecurity"
52465 +
52466 +config GRKERNSEC
52467 + bool "Grsecurity"
52468 + select CRYPTO
52469 + select CRYPTO_SHA256
52470 + help
52471 + If you say Y here, you will be able to configure many features
52472 + that will enhance the security of your system. It is highly
52473 + recommended that you say Y here and read through the help
52474 + for each option so that you fully understand the features and
52475 + can evaluate their usefulness for your machine.
52476 +
52477 +choice
52478 + prompt "Security Level"
52479 + depends on GRKERNSEC
52480 + default GRKERNSEC_CUSTOM
52481 +
52482 +config GRKERNSEC_LOW
52483 + bool "Low"
52484 + select GRKERNSEC_LINK
52485 + select GRKERNSEC_FIFO
52486 + select GRKERNSEC_EXECVE
52487 + select GRKERNSEC_RANDNET
52488 + select GRKERNSEC_DMESG
52489 + select GRKERNSEC_CHROOT
52490 + select GRKERNSEC_CHROOT_CHDIR
52491 +
52492 + help
52493 + If you choose this option, several of the grsecurity options will
52494 + be enabled that will give you greater protection against a number
52495 + of attacks, while assuring that none of your software will have any
52496 + conflicts with the additional security measures. If you run a lot
52497 + of unusual software, or you are having problems with the higher
52498 + security levels, you should say Y here. With this option, the
52499 + following features are enabled:
52500 +
52501 + - Linking restrictions
52502 + - FIFO restrictions
52503 + - Enforcing RLIMIT_NPROC on execve
52504 + - Restricted dmesg
52505 + - Enforced chdir("/") on chroot
52506 + - Runtime module disabling
52507 +
52508 +config GRKERNSEC_MEDIUM
52509 + bool "Medium"
52510 + select PAX
52511 + select PAX_EI_PAX
52512 + select PAX_PT_PAX_FLAGS
52513 + select PAX_HAVE_ACL_FLAGS
52514 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52515 + select GRKERNSEC_CHROOT
52516 + select GRKERNSEC_CHROOT_SYSCTL
52517 + select GRKERNSEC_LINK
52518 + select GRKERNSEC_FIFO
52519 + select GRKERNSEC_EXECVE
52520 + select GRKERNSEC_DMESG
52521 + select GRKERNSEC_RANDNET
52522 + select GRKERNSEC_FORKFAIL
52523 + select GRKERNSEC_TIME
52524 + select GRKERNSEC_SIGNAL
52525 + select GRKERNSEC_CHROOT
52526 + select GRKERNSEC_CHROOT_UNIX
52527 + select GRKERNSEC_CHROOT_MOUNT
52528 + select GRKERNSEC_CHROOT_PIVOT
52529 + select GRKERNSEC_CHROOT_DOUBLE
52530 + select GRKERNSEC_CHROOT_CHDIR
52531 + select GRKERNSEC_CHROOT_MKNOD
52532 + select GRKERNSEC_PROC
52533 + select GRKERNSEC_PROC_USERGROUP
52534 + select PAX_RANDUSTACK
52535 + select PAX_ASLR
52536 + select PAX_RANDMMAP
52537 + select PAX_REFCOUNT if (X86 || SPARC64)
52538 + select PAX_USERCOPY if ((X86 || SPARC32 || SPARC64 || PPC) && (SLAB || SLUB || SLOB))
52539 +
52540 + help
52541 + If you say Y here, several features in addition to those included
52542 + in the low additional security level will be enabled. These
52543 + features provide even more security to your system, though in rare
52544 + cases they may be incompatible with very old or poorly written
52545 + software. If you enable this option, make sure that your auth
52546 + service (identd) is running as gid 1001. With this option,
52547 + the following features (in addition to those provided in the
52548 + low additional security level) will be enabled:
52549 +
52550 + - Failed fork logging
52551 + - Time change logging
52552 + - Signal logging
52553 + - Deny mounts in chroot
52554 + - Deny double chrooting
52555 + - Deny sysctl writes in chroot
52556 + - Deny mknod in chroot
52557 + - Deny access to abstract AF_UNIX sockets out of chroot
52558 + - Deny pivot_root in chroot
52559 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
52560 + - /proc restrictions with special GID set to 10 (usually wheel)
52561 + - Address Space Layout Randomization (ASLR)
52562 + - Prevent exploitation of most refcount overflows
52563 + - Bounds checking of copying between the kernel and userland
52564 +
52565 +config GRKERNSEC_HIGH
52566 + bool "High"
52567 + select GRKERNSEC_LINK
52568 + select GRKERNSEC_FIFO
52569 + select GRKERNSEC_EXECVE
52570 + select GRKERNSEC_DMESG
52571 + select GRKERNSEC_FORKFAIL
52572 + select GRKERNSEC_TIME
52573 + select GRKERNSEC_SIGNAL
52574 + select GRKERNSEC_CHROOT
52575 + select GRKERNSEC_CHROOT_SHMAT
52576 + select GRKERNSEC_CHROOT_UNIX
52577 + select GRKERNSEC_CHROOT_MOUNT
52578 + select GRKERNSEC_CHROOT_FCHDIR
52579 + select GRKERNSEC_CHROOT_PIVOT
52580 + select GRKERNSEC_CHROOT_DOUBLE
52581 + select GRKERNSEC_CHROOT_CHDIR
52582 + select GRKERNSEC_CHROOT_MKNOD
52583 + select GRKERNSEC_CHROOT_CAPS
52584 + select GRKERNSEC_CHROOT_SYSCTL
52585 + select GRKERNSEC_CHROOT_FINDTASK
52586 + select GRKERNSEC_SYSFS_RESTRICT
52587 + select GRKERNSEC_PROC
52588 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
52589 + select GRKERNSEC_HIDESYM
52590 + select GRKERNSEC_BRUTE
52591 + select GRKERNSEC_PROC_USERGROUP
52592 + select GRKERNSEC_KMEM
52593 + select GRKERNSEC_RESLOG
52594 + select GRKERNSEC_RANDNET
52595 + select GRKERNSEC_PROC_ADD
52596 + select GRKERNSEC_CHROOT_CHMOD
52597 + select GRKERNSEC_CHROOT_NICE
52598 + select GRKERNSEC_AUDIT_MOUNT
52599 + select GRKERNSEC_MODHARDEN if (MODULES)
52600 + select GRKERNSEC_HARDEN_PTRACE
52601 + select GRKERNSEC_VM86 if (X86_32)
52602 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC32 || SPARC64)
52603 + select PAX
52604 + select PAX_RANDUSTACK
52605 + select PAX_ASLR
52606 + select PAX_RANDMMAP
52607 + select PAX_NOEXEC
52608 + select PAX_MPROTECT
52609 + select PAX_EI_PAX
52610 + select PAX_PT_PAX_FLAGS
52611 + select PAX_HAVE_ACL_FLAGS
52612 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
52613 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
52614 + select PAX_RANDKSTACK if (X86_TSC && X86)
52615 + select PAX_SEGMEXEC if (X86_32)
52616 + select PAX_PAGEEXEC
52617 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC32 || SPARC64)
52618 + select PAX_EMUTRAMP if (PARISC)
52619 + select PAX_EMUSIGRT if (PARISC)
52620 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
52621 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
52622 + select PAX_REFCOUNT if (X86 || SPARC64)
52623 + select PAX_USERCOPY if ((X86 || PPC || SPARC32 || SPARC64) && (SLAB || SLUB || SLOB))
52624 + help
52625 + If you say Y here, many of the features of grsecurity will be
52626 + enabled, which will protect you against many kinds of attacks
52627 + against your system. The heightened security comes at a cost
52628 + of an increased chance of incompatibilities with rare software
52629 + on your machine. Since this security level enables PaX, you should
52630 + view <http://pax.grsecurity.net> and read about the PaX
52631 + project. While you are there, download chpax and run it on
52632 + binaries that cause problems with PaX. Also remember that
52633 + since the /proc restrictions are enabled, you must run your
52634 + identd as gid 1001. This security level enables the following
52635 + features in addition to those listed in the low and medium
52636 + security levels:
52637 +
52638 + - Additional /proc restrictions
52639 + - Chmod restrictions in chroot
52640 + - No signals, ptrace, or viewing of processes outside of chroot
52641 + - Capability restrictions in chroot
52642 + - Deny fchdir out of chroot
52643 + - Priority restrictions in chroot
52644 + - Segmentation-based implementation of PaX
52645 + - Mprotect restrictions
52646 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
52647 + - Kernel stack randomization
52648 + - Mount/unmount/remount logging
52649 + - Kernel symbol hiding
52650 + - Prevention of memory exhaustion-based exploits
52651 + - Hardening of module auto-loading
52652 + - Ptrace restrictions
52653 + - Restricted vm86 mode
52654 + - Restricted sysfs/debugfs
52655 + - Active kernel exploit response
52656 +
52657 +config GRKERNSEC_CUSTOM
52658 + bool "Custom"
52659 + help
52660 + If you say Y here, you will be able to configure every grsecurity
52661 + option, which allows you to enable many more features that aren't
52662 + covered in the basic security levels. These additional features
52663 + include TPE, socket restrictions, and the sysctl system for
52664 + grsecurity. It is advised that you read through the help for
52665 + each option to determine its usefulness in your situation.
52666 +
52667 +endchoice
52668 +
52669 +menu "Address Space Protection"
52670 +depends on GRKERNSEC
52671 +
52672 +config GRKERNSEC_KMEM
52673 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
52674 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
52675 + help
52676 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
52677 + be written to via mmap or otherwise to modify the running kernel.
52678 + /dev/port will also not be allowed to be opened. If you have module
52679 + support disabled, enabling this will close up four ways that are
52680 + currently used to insert malicious code into the running kernel.
52681 + Even with all these features enabled, we still highly recommend that
52682 + you use the RBAC system, as it is still possible for an attacker to
52683 + modify the running kernel through privileged I/O granted by ioperm/iopl.
52684 + If you are not using XFree86, you may be able to stop this additional
52685 + case by enabling the 'Disable privileged I/O' option. Though nothing
52686 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
52687 + but only to video memory, which is the only writing we allow in this
52688 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
52689 + not be allowed to mprotect it with PROT_WRITE later.
52690 + It is highly recommended that you say Y here if you meet all the
52691 + conditions above.
52692 +
52693 +config GRKERNSEC_VM86
52694 + bool "Restrict VM86 mode"
52695 + depends on X86_32
52696 +
52697 + help
52698 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
52699 + make use of a special execution mode on 32bit x86 processors called
52700 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
52701 + video cards and will still work with this option enabled. The purpose
52702 + of the option is to prevent exploitation of emulation errors in
52703 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
52704 + Nearly all users should be able to enable this option.
52705 +
52706 +config GRKERNSEC_IO
52707 + bool "Disable privileged I/O"
52708 + depends on X86
52709 + select RTC_CLASS
52710 + select RTC_INTF_DEV
52711 + select RTC_DRV_CMOS
52712 +
52713 + help
52714 + If you say Y here, all ioperm and iopl calls will return an error.
52715 + Ioperm and iopl can be used to modify the running kernel.
52716 + Unfortunately, some programs need this access to operate properly,
52717 + the most notable of which are XFree86 and hwclock. hwclock can be
52718 + remedied by having RTC support in the kernel, so real-time
52719 + clock support is enabled if this option is enabled, to ensure
52720 + that hwclock operates correctly. XFree86 still will not
52721 + operate correctly with this option enabled, so DO NOT CHOOSE Y
52722 + IF YOU USE XFree86. If you use XFree86 and you still want to
52723 + protect your kernel against modification, use the RBAC system.
52724 +
52725 +config GRKERNSEC_PROC_MEMMAP
52726 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
52727 + default y if (PAX_NOEXEC || PAX_ASLR)
52728 + depends on PAX_NOEXEC || PAX_ASLR
52729 + help
52730 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
52731 + give no information about the addresses of its mappings if
52732 + PaX features that rely on random addresses are enabled on the task.
52733 + If you use PaX it is greatly recommended that you say Y here as it
52734 + closes up a hole that makes the full ASLR useless for suid
52735 + binaries.
52736 +
52737 +config GRKERNSEC_BRUTE
52738 + bool "Deter exploit bruteforcing"
52739 + help
52740 + If you say Y here, attempts to bruteforce exploits against forking
52741 + daemons such as apache or sshd, as well as against suid/sgid binaries
52742 + will be deterred. When a child of a forking daemon is killed by PaX
52743 + or crashes due to an illegal instruction or other suspicious signal,
52744 + the parent process will be delayed 30 seconds upon every subsequent
52745 + fork until the administrator is able to assess the situation and
52746 + restart the daemon.
52747 + In the suid/sgid case, the attempt is logged, the user has all their
52748 + processes terminated, and they are prevented from executing any further
52749 + processes for 15 minutes.
52750 + It is recommended that you also enable signal logging in the auditing
52751 + section so that logs are generated when a process triggers a suspicious
52752 + signal.
52753 +
52754 +config GRKERNSEC_MODHARDEN
52755 + bool "Harden module auto-loading"
52756 + depends on MODULES
52757 + help
52758 + If you say Y here, module auto-loading in response to use of some
52759 + feature implemented by an unloaded module will be restricted to
52760 + root users. Enabling this option helps defend against attacks
52761 + by unprivileged users who abuse the auto-loading behavior to
52762 + cause a vulnerable module to load that is then exploited.
52763 +
52764 + If this option prevents a legitimate use of auto-loading for a
52765 + non-root user, the administrator can execute modprobe manually
52766 + with the exact name of the module mentioned in the alert log.
52767 + Alternatively, the administrator can add the module to the list
52768 + of modules loaded at boot by modifying init scripts.
52769 +
52770 + Modification of init scripts will most likely be needed on
52771 + Ubuntu servers with encrypted home directory support enabled,
52772 + as the first non-root user logging in will cause the ecb(aes),
52773 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
52774 +
52775 +config GRKERNSEC_HIDESYM
52776 + bool "Hide kernel symbols"
52777 + help
52778 + If you say Y here, getting information on loaded modules, and
52779 + displaying all kernel symbols through a syscall will be restricted
52780 + to users with CAP_SYS_MODULE. For software compatibility reasons,
52781 + /proc/kallsyms will be restricted to the root user. The RBAC
52782 + system can hide that entry even from root.
52783 +
52784 + This option also prevents leaking of kernel addresses through
52785 + several /proc entries.
52786 +
52787 + Note that this option is only effective provided the following
52788 + conditions are met:
52789 + 1) The kernel using grsecurity is not precompiled by some distribution
52790 + 2) You have also enabled GRKERNSEC_DMESG
52791 + 3) You are using the RBAC system and hiding other files such as your
52792 + kernel image and System.map. Alternatively, enabling this option
52793 + causes the permissions on /boot, /lib/modules, and the kernel
52794 + source directory to change at compile time to prevent
52795 + reading by non-root users.
52796 + If the above conditions are met, this option will aid in providing a
52797 + useful protection against local kernel exploitation of overflows
52798 + and arbitrary read/write vulnerabilities.
52799 +
52800 +config GRKERNSEC_KERN_LOCKOUT
52801 + bool "Active kernel exploit response"
52802 + depends on X86 || ARM || PPC || SPARC32 || SPARC64
52803 + help
52804 + If you say Y here, when a PaX alert is triggered due to suspicious
52805 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
52806 + or an OOPs occurs due to bad memory accesses, instead of just
52807 + terminating the offending process (and potentially allowing
52808 + a subsequent exploit from the same user), we will take one of two
52809 + actions:
52810 + If the user was root, we will panic the system
52811 + If the user was non-root, we will log the attempt, terminate
52812 + all processes owned by the user, then prevent them from creating
52813 + any new processes until the system is restarted
52814 + This deters repeated kernel exploitation/bruteforcing attempts
52815 + and is useful for later forensics.
52816 +
52817 +endmenu
52818 +menu "Role Based Access Control Options"
52819 +depends on GRKERNSEC
52820 +
52821 +config GRKERNSEC_RBAC_DEBUG
52822 + bool
52823 +
52824 +config GRKERNSEC_NO_RBAC
52825 + bool "Disable RBAC system"
52826 + help
52827 + If you say Y here, the /dev/grsec device will be removed from the kernel,
52828 + preventing the RBAC system from being enabled. You should only say Y
52829 + here if you have no intention of using the RBAC system, so as to prevent
52830 + an attacker with root access from misusing the RBAC system to hide files
52831 + and processes when loadable module support and /dev/[k]mem have been
52832 + locked down.
52833 +
52834 +config GRKERNSEC_ACL_HIDEKERN
52835 + bool "Hide kernel processes"
52836 + help
52837 + If you say Y here, all kernel threads will be hidden to all
52838 + processes but those whose subject has the "view hidden processes"
52839 + flag.
52840 +
52841 +config GRKERNSEC_ACL_MAXTRIES
52842 + int "Maximum tries before password lockout"
52843 + default 3
52844 + help
52845 + This option enforces the maximum number of times a user can attempt
52846 + to authorize themselves with the grsecurity RBAC system before being
52847 + denied the ability to attempt authorization again for a specified time.
52848 + The lower the number, the harder it will be to brute-force a password.
52849 +
52850 +config GRKERNSEC_ACL_TIMEOUT
52851 + int "Time to wait after max password tries, in seconds"
52852 + default 30
52853 + help
52854 + This option specifies the time the user must wait after attempting to
52855 + authorize to the RBAC system with the maximum number of invalid
52856 + passwords. The higher the number, the harder it will be to brute-force
52857 + a password.
52858 +
52859 +endmenu
52860 +menu "Filesystem Protections"
52861 +depends on GRKERNSEC
52862 +
52863 +config GRKERNSEC_PROC
52864 + bool "Proc restrictions"
52865 + help
52866 + If you say Y here, the permissions of the /proc filesystem
52867 + will be altered to enhance system security and privacy. You MUST
52868 + choose either a user only restriction or a user and group restriction.
52869 + Depending upon the option you choose, you can either restrict users to
52870 + see only the processes they themselves run, or choose a group that can
52871 + view all processes and files normally restricted to root if you choose
52872 + the "restrict to user only" option. NOTE: If you're running identd as
52873 + a non-root user, you will have to run it as the group you specify here.
52874 +
52875 +config GRKERNSEC_PROC_USER
52876 + bool "Restrict /proc to user only"
52877 + depends on GRKERNSEC_PROC
52878 + help
52879 + If you say Y here, non-root users will only be able to view their own
52880 + processes, and restricts them from viewing network-related information,
52881 + and viewing kernel symbol and module information.
52882 +
52883 +config GRKERNSEC_PROC_USERGROUP
52884 + bool "Allow special group"
52885 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
52886 + help
52887 + If you say Y here, you will be able to select a group that will be
52888 + able to view all processes and network-related information. If you've
52889 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
52890 + remain hidden. This option is useful if you want to run identd as
52891 + a non-root user.
52892 +
52893 +config GRKERNSEC_PROC_GID
52894 + int "GID for special group"
52895 + depends on GRKERNSEC_PROC_USERGROUP
52896 + default 1001
52897 +
52898 +config GRKERNSEC_PROC_ADD
52899 + bool "Additional restrictions"
52900 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
52901 + help
52902 + If you say Y here, additional restrictions will be placed on
52903 + /proc that keep normal users from viewing device information and
52904 + slabinfo information that could be useful for exploits.
52905 +
52906 +config GRKERNSEC_LINK
52907 + bool "Linking restrictions"
52908 + help
52909 + If you say Y here, /tmp race exploits will be prevented, since users
52910 + will no longer be able to follow symlinks owned by other users in
52911 + world-writable +t directories (e.g. /tmp), unless the owner of the
52912 + symlink is the owner of the directory. users will also not be
52913 + able to hardlink to files they do not own. If the sysctl option is
52914 + enabled, a sysctl option with name "linking_restrictions" is created.
52915 +
52916 +config GRKERNSEC_FIFO
52917 + bool "FIFO restrictions"
52918 + help
52919 + If you say Y here, users will not be able to write to FIFOs they don't
52920 + own in world-writable +t directories (e.g. /tmp), unless the owner of
52921 + the FIFO is the same owner of the directory it's held in. If the sysctl
52922 + option is enabled, a sysctl option with name "fifo_restrictions" is
52923 + created.
52924 +
52925 +config GRKERNSEC_SYSFS_RESTRICT
52926 + bool "Sysfs/debugfs restriction"
52927 + depends on SYSFS
52928 + help
52929 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
52930 + any filesystem normally mounted under it (e.g. debugfs) will only
52931 + be accessible by root. These filesystems generally provide access
52932 + to hardware and debug information that isn't appropriate for unprivileged
52933 + users of the system. Sysfs and debugfs have also become a large source
52934 + of new vulnerabilities, ranging from infoleaks to local compromise.
52935 + There has been very little oversight with an eye toward security involved
52936 + in adding new exporters of information to these filesystems, so their
52937 + use is discouraged.
52938 + This option is equivalent to a chmod 0700 of the mount paths.
52939 +
52940 +config GRKERNSEC_ROFS
52941 + bool "Runtime read-only mount protection"
52942 + help
52943 + If you say Y here, a sysctl option with name "romount_protect" will
52944 + be created. By setting this option to 1 at runtime, filesystems
52945 + will be protected in the following ways:
52946 + * No new writable mounts will be allowed
52947 + * Existing read-only mounts won't be able to be remounted read/write
52948 + * Write operations will be denied on all block devices
52949 + This option acts independently of grsec_lock: once it is set to 1,
52950 + it cannot be turned off. Therefore, please be mindful of the resulting
52951 + behavior if this option is enabled in an init script on a read-only
52952 + filesystem. This feature is mainly intended for secure embedded systems.
52953 +
52954 +config GRKERNSEC_CHROOT
52955 + bool "Chroot jail restrictions"
52956 + help
52957 + If you say Y here, you will be able to choose several options that will
52958 + make breaking out of a chrooted jail much more difficult. If you
52959 + encounter no software incompatibilities with the following options, it
52960 + is recommended that you enable each one.
52961 +
52962 +config GRKERNSEC_CHROOT_MOUNT
52963 + bool "Deny mounts"
52964 + depends on GRKERNSEC_CHROOT
52965 + help
52966 + If you say Y here, processes inside a chroot will not be able to
52967 + mount or remount filesystems. If the sysctl option is enabled, a
52968 + sysctl option with name "chroot_deny_mount" is created.
52969 +
52970 +config GRKERNSEC_CHROOT_DOUBLE
52971 + bool "Deny double-chroots"
52972 + depends on GRKERNSEC_CHROOT
52973 + help
52974 + If you say Y here, processes inside a chroot will not be able to chroot
52975 + again outside the chroot. This is a widely used method of breaking
52976 + out of a chroot jail and should not be allowed. If the sysctl
52977 + option is enabled, a sysctl option with name
52978 + "chroot_deny_chroot" is created.
52979 +
52980 +config GRKERNSEC_CHROOT_PIVOT
52981 + bool "Deny pivot_root in chroot"
52982 + depends on GRKERNSEC_CHROOT
52983 + help
52984 + If you say Y here, processes inside a chroot will not be able to use
52985 + a function called pivot_root() that was introduced in Linux 2.3.41. It
52986 + works similar to chroot in that it changes the root filesystem. This
52987 + function could be misused in a chrooted process to attempt to break out
52988 + of the chroot, and therefore should not be allowed. If the sysctl
52989 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
52990 + created.
52991 +
52992 +config GRKERNSEC_CHROOT_CHDIR
52993 + bool "Enforce chdir(\"/\") on all chroots"
52994 + depends on GRKERNSEC_CHROOT
52995 + help
52996 + If you say Y here, the current working directory of all newly-chrooted
52997 + applications will be set to the the root directory of the chroot.
52998 + The man page on chroot(2) states:
52999 + Note that this call does not change the current working
53000 + directory, so that `.' can be outside the tree rooted at
53001 + `/'. In particular, the super-user can escape from a
53002 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
53003 +
53004 + It is recommended that you say Y here, since it's not known to break
53005 + any software. If the sysctl option is enabled, a sysctl option with
53006 + name "chroot_enforce_chdir" is created.
53007 +
53008 +config GRKERNSEC_CHROOT_CHMOD
53009 + bool "Deny (f)chmod +s"
53010 + depends on GRKERNSEC_CHROOT
53011 + help
53012 + If you say Y here, processes inside a chroot will not be able to chmod
53013 + or fchmod files to make them have suid or sgid bits. This protects
53014 + against another published method of breaking a chroot. If the sysctl
53015 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
53016 + created.
53017 +
53018 +config GRKERNSEC_CHROOT_FCHDIR
53019 + bool "Deny fchdir out of chroot"
53020 + depends on GRKERNSEC_CHROOT
53021 + help
53022 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
53023 + to a file descriptor of the chrooting process that points to a directory
53024 + outside the filesystem will be stopped. If the sysctl option
53025 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
53026 +
53027 +config GRKERNSEC_CHROOT_MKNOD
53028 + bool "Deny mknod"
53029 + depends on GRKERNSEC_CHROOT
53030 + help
53031 + If you say Y here, processes inside a chroot will not be allowed to
53032 + mknod. The problem with using mknod inside a chroot is that it
53033 + would allow an attacker to create a device entry that is the same
53034 + as one on the physical root of your system, which could range from
53035 + anything from the console device to a device for your harddrive (which
53036 + they could then use to wipe the drive or steal data). It is recommended
53037 + that you say Y here, unless you run into software incompatibilities.
53038 + If the sysctl option is enabled, a sysctl option with name
53039 + "chroot_deny_mknod" is created.
53040 +
53041 +config GRKERNSEC_CHROOT_SHMAT
53042 + bool "Deny shmat() out of chroot"
53043 + depends on GRKERNSEC_CHROOT
53044 + help
53045 + If you say Y here, processes inside a chroot will not be able to attach
53046 + to shared memory segments that were created outside of the chroot jail.
53047 + It is recommended that you say Y here. If the sysctl option is enabled,
53048 + a sysctl option with name "chroot_deny_shmat" is created.
53049 +
53050 +config GRKERNSEC_CHROOT_UNIX
53051 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
53052 + depends on GRKERNSEC_CHROOT
53053 + help
53054 + If you say Y here, processes inside a chroot will not be able to
53055 + connect to abstract (meaning not belonging to a filesystem) Unix
53056 + domain sockets that were bound outside of a chroot. It is recommended
53057 + that you say Y here. If the sysctl option is enabled, a sysctl option
53058 + with name "chroot_deny_unix" is created.
53059 +
53060 +config GRKERNSEC_CHROOT_FINDTASK
53061 + bool "Protect outside processes"
53062 + depends on GRKERNSEC_CHROOT
53063 + help
53064 + If you say Y here, processes inside a chroot will not be able to
53065 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
53066 + getsid, or view any process outside of the chroot. If the sysctl
53067 + option is enabled, a sysctl option with name "chroot_findtask" is
53068 + created.
53069 +
53070 +config GRKERNSEC_CHROOT_NICE
53071 + bool "Restrict priority changes"
53072 + depends on GRKERNSEC_CHROOT
53073 + help
53074 + If you say Y here, processes inside a chroot will not be able to raise
53075 + the priority of processes in the chroot, or alter the priority of
53076 + processes outside the chroot. This provides more security than simply
53077 + removing CAP_SYS_NICE from the process' capability set. If the
53078 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
53079 + is created.
53080 +
53081 +config GRKERNSEC_CHROOT_SYSCTL
53082 + bool "Deny sysctl writes"
53083 + depends on GRKERNSEC_CHROOT
53084 + help
53085 + If you say Y here, an attacker in a chroot will not be able to
53086 + write to sysctl entries, either by sysctl(2) or through a /proc
53087 + interface. It is strongly recommended that you say Y here. If the
53088 + sysctl option is enabled, a sysctl option with name
53089 + "chroot_deny_sysctl" is created.
53090 +
53091 +config GRKERNSEC_CHROOT_CAPS
53092 + bool "Capability restrictions"
53093 + depends on GRKERNSEC_CHROOT
53094 + help
53095 + If you say Y here, the capabilities on all root processes within a
53096 + chroot jail will be lowered to stop module insertion, raw i/o,
53097 + system and net admin tasks, rebooting the system, modifying immutable
53098 + files, modifying IPC owned by another, and changing the system time.
53099 + This is left an option because it can break some apps. Disable this
53100 + if your chrooted apps are having problems performing those kinds of
53101 + tasks. If the sysctl option is enabled, a sysctl option with
53102 + name "chroot_caps" is created.
53103 +
53104 +endmenu
53105 +menu "Kernel Auditing"
53106 +depends on GRKERNSEC
53107 +
53108 +config GRKERNSEC_AUDIT_GROUP
53109 + bool "Single group for auditing"
53110 + help
53111 + If you say Y here, the exec, chdir, and (un)mount logging features
53112 + will only operate on a group you specify. This option is recommended
53113 + if you only want to watch certain users instead of having a large
53114 + amount of logs from the entire system. If the sysctl option is enabled,
53115 + a sysctl option with name "audit_group" is created.
53116 +
53117 +config GRKERNSEC_AUDIT_GID
53118 + int "GID for auditing"
53119 + depends on GRKERNSEC_AUDIT_GROUP
53120 + default 1007
53121 +
53122 +config GRKERNSEC_EXECLOG
53123 + bool "Exec logging"
53124 + help
53125 + If you say Y here, all execve() calls will be logged (since the
53126 + other exec*() calls are frontends to execve(), all execution
53127 + will be logged). Useful for shell-servers that like to keep track
53128 + of their users. If the sysctl option is enabled, a sysctl option with
53129 + name "exec_logging" is created.
53130 + WARNING: This option when enabled will produce a LOT of logs, especially
53131 + on an active system.
53132 +
53133 +config GRKERNSEC_RESLOG
53134 + bool "Resource logging"
53135 + help
53136 + If you say Y here, all attempts to overstep resource limits will
53137 + be logged with the resource name, the requested size, and the current
53138 + limit. It is highly recommended that you say Y here. If the sysctl
53139 + option is enabled, a sysctl option with name "resource_logging" is
53140 + created. If the RBAC system is enabled, the sysctl value is ignored.
53141 +
53142 +config GRKERNSEC_CHROOT_EXECLOG
53143 + bool "Log execs within chroot"
53144 + help
53145 + If you say Y here, all executions inside a chroot jail will be logged
53146 + to syslog. This can cause a large amount of logs if certain
53147 + applications (eg. djb's daemontools) are installed on the system, and
53148 + is therefore left as an option. If the sysctl option is enabled, a
53149 + sysctl option with name "chroot_execlog" is created.
53150 +
53151 +config GRKERNSEC_AUDIT_PTRACE
53152 + bool "Ptrace logging"
53153 + help
53154 + If you say Y here, all attempts to attach to a process via ptrace
53155 + will be logged. If the sysctl option is enabled, a sysctl option
53156 + with name "audit_ptrace" is created.
53157 +
53158 +config GRKERNSEC_AUDIT_CHDIR
53159 + bool "Chdir logging"
53160 + help
53161 + If you say Y here, all chdir() calls will be logged. If the sysctl
53162 + option is enabled, a sysctl option with name "audit_chdir" is created.
53163 +
53164 +config GRKERNSEC_AUDIT_MOUNT
53165 + bool "(Un)Mount logging"
53166 + help
53167 + If you say Y here, all mounts and unmounts will be logged. If the
53168 + sysctl option is enabled, a sysctl option with name "audit_mount" is
53169 + created.
53170 +
53171 +config GRKERNSEC_SIGNAL
53172 + bool "Signal logging"
53173 + help
53174 + If you say Y here, certain important signals will be logged, such as
53175 + SIGSEGV, which will as a result inform you of when a error in a program
53176 + occurred, which in some cases could mean a possible exploit attempt.
53177 + If the sysctl option is enabled, a sysctl option with name
53178 + "signal_logging" is created.
53179 +
53180 +config GRKERNSEC_FORKFAIL
53181 + bool "Fork failure logging"
53182 + help
53183 + If you say Y here, all failed fork() attempts will be logged.
53184 + This could suggest a fork bomb, or someone attempting to overstep
53185 + their process limit. If the sysctl option is enabled, a sysctl option
53186 + with name "forkfail_logging" is created.
53187 +
53188 +config GRKERNSEC_TIME
53189 + bool "Time change logging"
53190 + help
53191 + If you say Y here, any changes of the system clock will be logged.
53192 + If the sysctl option is enabled, a sysctl option with name
53193 + "timechange_logging" is created.
53194 +
53195 +config GRKERNSEC_PROC_IPADDR
53196 + bool "/proc/<pid>/ipaddr support"
53197 + help
53198 + If you say Y here, a new entry will be added to each /proc/<pid>
53199 + directory that contains the IP address of the person using the task.
53200 + The IP is carried across local TCP and AF_UNIX stream sockets.
53201 + This information can be useful for IDS/IPSes to perform remote response
53202 + to a local attack. The entry is readable by only the owner of the
53203 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
53204 + the RBAC system), and thus does not create privacy concerns.
53205 +
53206 +config GRKERNSEC_RWXMAP_LOG
53207 + bool 'Denied RWX mmap/mprotect logging'
53208 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
53209 + help
53210 + If you say Y here, calls to mmap() and mprotect() with explicit
53211 + usage of PROT_WRITE and PROT_EXEC together will be logged when
53212 + denied by the PAX_MPROTECT feature. If the sysctl option is
53213 + enabled, a sysctl option with name "rwxmap_logging" is created.
53214 +
53215 +config GRKERNSEC_AUDIT_TEXTREL
53216 + bool 'ELF text relocations logging (READ HELP)'
53217 + depends on PAX_MPROTECT
53218 + help
53219 + If you say Y here, text relocations will be logged with the filename
53220 + of the offending library or binary. The purpose of the feature is
53221 + to help Linux distribution developers get rid of libraries and
53222 + binaries that need text relocations which hinder the future progress
53223 + of PaX. Only Linux distribution developers should say Y here, and
53224 + never on a production machine, as this option creates an information
53225 + leak that could aid an attacker in defeating the randomization of
53226 + a single memory region. If the sysctl option is enabled, a sysctl
53227 + option with name "audit_textrel" is created.
53228 +
53229 +endmenu
53230 +
53231 +menu "Executable Protections"
53232 +depends on GRKERNSEC
53233 +
53234 +config GRKERNSEC_EXECVE
53235 + bool "Enforce RLIMIT_NPROC on execs"
53236 + help
53237 + If you say Y here, users with a resource limit on processes will
53238 + have the value checked during execve() calls. The current system
53239 + only checks the system limit during fork() calls. If the sysctl option
53240 + is enabled, a sysctl option with name "execve_limiting" is created.
53241 +
53242 +config GRKERNSEC_DMESG
53243 + bool "Dmesg(8) restriction"
53244 + help
53245 + If you say Y here, non-root users will not be able to use dmesg(8)
53246 + to view up to the last 4kb of messages in the kernel's log buffer.
53247 + The kernel's log buffer often contains kernel addresses and other
53248 + identifying information useful to an attacker in fingerprinting a
53249 + system for a targeted exploit.
53250 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
53251 + created.
53252 +
53253 +config GRKERNSEC_HARDEN_PTRACE
53254 + bool "Deter ptrace-based process snooping"
53255 + help
53256 + If you say Y here, TTY sniffers and other malicious monitoring
53257 + programs implemented through ptrace will be defeated. If you
53258 + have been using the RBAC system, this option has already been
53259 + enabled for several years for all users, with the ability to make
53260 + fine-grained exceptions.
53261 +
53262 + This option only affects the ability of non-root users to ptrace
53263 + processes that are not a descendent of the ptracing process.
53264 + This means that strace ./binary and gdb ./binary will still work,
53265 + but attaching to arbitrary processes will not. If the sysctl
53266 + option is enabled, a sysctl option with name "harden_ptrace" is
53267 + created.
53268 +
53269 +config GRKERNSEC_TPE
53270 + bool "Trusted Path Execution (TPE)"
53271 + help
53272 + If you say Y here, you will be able to choose a gid to add to the
53273 + supplementary groups of users you want to mark as "untrusted."
53274 + These users will not be able to execute any files that are not in
53275 + root-owned directories writable only by root. If the sysctl option
53276 + is enabled, a sysctl option with name "tpe" is created.
53277 +
53278 +config GRKERNSEC_TPE_ALL
53279 + bool "Partially restrict all non-root users"
53280 + depends on GRKERNSEC_TPE
53281 + help
53282 + If you say Y here, all non-root users will be covered under
53283 + a weaker TPE restriction. This is separate from, and in addition to,
53284 + the main TPE options that you have selected elsewhere. Thus, if a
53285 + "trusted" GID is chosen, this restriction applies to even that GID.
53286 + Under this restriction, all non-root users will only be allowed to
53287 + execute files in directories they own that are not group or
53288 + world-writable, or in directories owned by root and writable only by
53289 + root. If the sysctl option is enabled, a sysctl option with name
53290 + "tpe_restrict_all" is created.
53291 +
53292 +config GRKERNSEC_TPE_INVERT
53293 + bool "Invert GID option"
53294 + depends on GRKERNSEC_TPE
53295 + help
53296 + If you say Y here, the group you specify in the TPE configuration will
53297 + decide what group TPE restrictions will be *disabled* for. This
53298 + option is useful if you want TPE restrictions to be applied to most
53299 + users on the system. If the sysctl option is enabled, a sysctl option
53300 + with name "tpe_invert" is created. Unlike other sysctl options, this
53301 + entry will default to on for backward-compatibility.
53302 +
53303 +config GRKERNSEC_TPE_GID
53304 + int "GID for untrusted users"
53305 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
53306 + default 1005
53307 + help
53308 + Setting this GID determines what group TPE restrictions will be
53309 + *enabled* for. If the sysctl option is enabled, a sysctl option
53310 + with name "tpe_gid" is created.
53311 +
53312 +config GRKERNSEC_TPE_GID
53313 + int "GID for trusted users"
53314 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
53315 + default 1005
53316 + help
53317 + Setting this GID determines what group TPE restrictions will be
53318 + *disabled* for. If the sysctl option is enabled, a sysctl option
53319 + with name "tpe_gid" is created.
53320 +
53321 +endmenu
53322 +menu "Network Protections"
53323 +depends on GRKERNSEC
53324 +
53325 +config GRKERNSEC_RANDNET
53326 + bool "Larger entropy pools"
53327 + help
53328 + If you say Y here, the entropy pools used for many features of Linux
53329 + and grsecurity will be doubled in size. Since several grsecurity
53330 + features use additional randomness, it is recommended that you say Y
53331 + here. Saying Y here has a similar effect as modifying
53332 + /proc/sys/kernel/random/poolsize.
53333 +
53334 +config GRKERNSEC_BLACKHOLE
53335 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
53336 + help
53337 + If you say Y here, neither TCP resets nor ICMP
53338 + destination-unreachable packets will be sent in response to packets
53339 + sent to ports for which no associated listening process exists.
53340 + This feature supports both IPV4 and IPV6 and exempts the
53341 + loopback interface from blackholing. Enabling this feature
53342 + makes a host more resilient to DoS attacks and reduces network
53343 + visibility against scanners.
53344 +
53345 + The blackhole feature as-implemented is equivalent to the FreeBSD
53346 + blackhole feature, as it prevents RST responses to all packets, not
53347 + just SYNs. Under most application behavior this causes no
53348 + problems, but applications (like haproxy) may not close certain
53349 + connections in a way that cleanly terminates them on the remote
53350 + end, leaving the remote host in LAST_ACK state. Because of this
53351 + side-effect and to prevent intentional LAST_ACK DoSes, this
53352 + feature also adds automatic mitigation against such attacks.
53353 + The mitigation drastically reduces the amount of time a socket
53354 + can spend in LAST_ACK state. If you're using haproxy and not
53355 + all servers it connects to have this option enabled, consider
53356 + disabling this feature on the haproxy host.
53357 +
53358 + If the sysctl option is enabled, two sysctl options with names
53359 + "ip_blackhole" and "lastack_retries" will be created.
53360 + While "ip_blackhole" takes the standard zero/non-zero on/off
53361 + toggle, "lastack_retries" uses the same kinds of values as
53362 + "tcp_retries1" and "tcp_retries2". The default value of 4
53363 + prevents a socket from lasting more than 45 seconds in LAST_ACK
53364 + state.
53365 +
53366 +config GRKERNSEC_SOCKET
53367 + bool "Socket restrictions"
53368 + help
53369 + If you say Y here, you will be able to choose from several options.
53370 + If you assign a GID on your system and add it to the supplementary
53371 + groups of users you want to restrict socket access to, this patch
53372 + will perform up to three things, based on the option(s) you choose.
53373 +
53374 +config GRKERNSEC_SOCKET_ALL
53375 + bool "Deny any sockets to group"
53376 + depends on GRKERNSEC_SOCKET
53377 + help
53378 + If you say Y here, you will be able to choose a GID of whose users will
53379 + be unable to connect to other hosts from your machine or run server
53380 + applications from your machine. If the sysctl option is enabled, a
53381 + sysctl option with name "socket_all" is created.
53382 +
53383 +config GRKERNSEC_SOCKET_ALL_GID
53384 + int "GID to deny all sockets for"
53385 + depends on GRKERNSEC_SOCKET_ALL
53386 + default 1004
53387 + help
53388 + Here you can choose the GID to disable socket access for. Remember to
53389 + add the users you want socket access disabled for to the GID
53390 + specified here. If the sysctl option is enabled, a sysctl option
53391 + with name "socket_all_gid" is created.
53392 +
53393 +config GRKERNSEC_SOCKET_CLIENT
53394 + bool "Deny client sockets to group"
53395 + depends on GRKERNSEC_SOCKET
53396 + help
53397 + If you say Y here, you will be able to choose a GID of whose users will
53398 + be unable to connect to other hosts from your machine, but will be
53399 + able to run servers. If this option is enabled, all users in the group
53400 + you specify will have to use passive mode when initiating ftp transfers
53401 + from the shell on your machine. If the sysctl option is enabled, a
53402 + sysctl option with name "socket_client" is created.
53403 +
53404 +config GRKERNSEC_SOCKET_CLIENT_GID
53405 + int "GID to deny client sockets for"
53406 + depends on GRKERNSEC_SOCKET_CLIENT
53407 + default 1003
53408 + help
53409 + Here you can choose the GID to disable client socket access for.
53410 + Remember to add the users you want client socket access disabled for to
53411 + the GID specified here. If the sysctl option is enabled, a sysctl
53412 + option with name "socket_client_gid" is created.
53413 +
53414 +config GRKERNSEC_SOCKET_SERVER
53415 + bool "Deny server sockets to group"
53416 + depends on GRKERNSEC_SOCKET
53417 + help
53418 + If you say Y here, you will be able to choose a GID of whose users will
53419 + be unable to run server applications from your machine. If the sysctl
53420 + option is enabled, a sysctl option with name "socket_server" is created.
53421 +
53422 +config GRKERNSEC_SOCKET_SERVER_GID
53423 + int "GID to deny server sockets for"
53424 + depends on GRKERNSEC_SOCKET_SERVER
53425 + default 1002
53426 + help
53427 + Here you can choose the GID to disable server socket access for.
53428 + Remember to add the users you want server socket access disabled for to
53429 + the GID specified here. If the sysctl option is enabled, a sysctl
53430 + option with name "socket_server_gid" is created.
53431 +
53432 +endmenu
53433 +menu "Sysctl support"
53434 +depends on GRKERNSEC && SYSCTL
53435 +
53436 +config GRKERNSEC_SYSCTL
53437 + bool "Sysctl support"
53438 + help
53439 + If you say Y here, you will be able to change the options that
53440 + grsecurity runs with at bootup, without having to recompile your
53441 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
53442 + to enable (1) or disable (0) various features. All the sysctl entries
53443 + are mutable until the "grsec_lock" entry is set to a non-zero value.
53444 + All features enabled in the kernel configuration are disabled at boot
53445 + if you do not say Y to the "Turn on features by default" option.
53446 + All options should be set at startup, and the grsec_lock entry should
53447 + be set to a non-zero value after all the options are set.
53448 + *THIS IS EXTREMELY IMPORTANT*
53449 +
53450 +config GRKERNSEC_SYSCTL_DISTRO
53451 + bool "Extra sysctl support for distro makers (READ HELP)"
53452 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
53453 + help
53454 + If you say Y here, additional sysctl options will be created
53455 + for features that affect processes running as root. Therefore,
53456 + it is critical when using this option that the grsec_lock entry be
53457 + enabled after boot. Only distros with prebuilt kernel packages
53458 + with this option enabled that can ensure grsec_lock is enabled
53459 + after boot should use this option.
53460 + *Failure to set grsec_lock after boot makes all grsec features
53461 + this option covers useless*
53462 +
53463 + Currently this option creates the following sysctl entries:
53464 + "Disable Privileged I/O": "disable_priv_io"
53465 +
53466 +config GRKERNSEC_SYSCTL_ON
53467 + bool "Turn on features by default"
53468 + depends on GRKERNSEC_SYSCTL
53469 + help
53470 + If you say Y here, instead of having all features enabled in the
53471 + kernel configuration disabled at boot time, the features will be
53472 + enabled at boot time. It is recommended you say Y here unless
53473 + there is some reason you would want all sysctl-tunable features to
53474 + be disabled by default. As mentioned elsewhere, it is important
53475 + to enable the grsec_lock entry once you have finished modifying
53476 + the sysctl entries.
53477 +
53478 +endmenu
53479 +menu "Logging Options"
53480 +depends on GRKERNSEC
53481 +
53482 +config GRKERNSEC_FLOODTIME
53483 + int "Seconds in between log messages (minimum)"
53484 + default 10
53485 + help
53486 + This option allows you to enforce the number of seconds between
53487 + grsecurity log messages. The default should be suitable for most
53488 + people, however, if you choose to change it, choose a value small enough
53489 + to allow informative logs to be produced, but large enough to
53490 + prevent flooding.
53491 +
53492 +config GRKERNSEC_FLOODBURST
53493 + int "Number of messages in a burst (maximum)"
53494 + default 4
53495 + help
53496 + This option allows you to choose the maximum number of messages allowed
53497 + within the flood time interval you chose in a separate option. The
53498 + default should be suitable for most people, however if you find that
53499 + many of your logs are being interpreted as flooding, you may want to
53500 + raise this value.
53501 +
53502 +endmenu
53503 +
53504 +endmenu
53505 diff -urNp linux-2.6.32.42/grsecurity/Makefile linux-2.6.32.42/grsecurity/Makefile
53506 --- linux-2.6.32.42/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
53507 +++ linux-2.6.32.42/grsecurity/Makefile 2011-05-24 20:27:46.000000000 -0400
53508 @@ -0,0 +1,33 @@
53509 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
53510 +# during 2001-2009 it has been completely redesigned by Brad Spengler
53511 +# into an RBAC system
53512 +#
53513 +# All code in this directory and various hooks inserted throughout the kernel
53514 +# are copyright Brad Spengler - Open Source Security, Inc., and released
53515 +# under the GPL v2 or higher
53516 +
53517 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
53518 + grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
53519 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
53520 +
53521 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
53522 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
53523 + gracl_learn.o grsec_log.o
53524 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
53525 +
53526 +ifdef CONFIG_NET
53527 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
53528 +endif
53529 +
53530 +ifndef CONFIG_GRKERNSEC
53531 +obj-y += grsec_disabled.o
53532 +endif
53533 +
53534 +ifdef CONFIG_GRKERNSEC_HIDESYM
53535 +extra-y := grsec_hidesym.o
53536 +$(obj)/grsec_hidesym.o:
53537 + @-chmod -f 500 /boot
53538 + @-chmod -f 500 /lib/modules
53539 + @-chmod -f 700 .
53540 + @echo ' grsec: protected kernel image paths'
53541 +endif
53542 diff -urNp linux-2.6.32.42/include/acpi/acpi_drivers.h linux-2.6.32.42/include/acpi/acpi_drivers.h
53543 --- linux-2.6.32.42/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
53544 +++ linux-2.6.32.42/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
53545 @@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
53546 Dock Station
53547 -------------------------------------------------------------------------- */
53548 struct acpi_dock_ops {
53549 - acpi_notify_handler handler;
53550 - acpi_notify_handler uevent;
53551 + const acpi_notify_handler handler;
53552 + const acpi_notify_handler uevent;
53553 };
53554
53555 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
53556 @@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
53557 extern int register_dock_notifier(struct notifier_block *nb);
53558 extern void unregister_dock_notifier(struct notifier_block *nb);
53559 extern int register_hotplug_dock_device(acpi_handle handle,
53560 - struct acpi_dock_ops *ops,
53561 + const struct acpi_dock_ops *ops,
53562 void *context);
53563 extern void unregister_hotplug_dock_device(acpi_handle handle);
53564 #else
53565 @@ -144,7 +144,7 @@ static inline void unregister_dock_notif
53566 {
53567 }
53568 static inline int register_hotplug_dock_device(acpi_handle handle,
53569 - struct acpi_dock_ops *ops,
53570 + const struct acpi_dock_ops *ops,
53571 void *context)
53572 {
53573 return -ENODEV;
53574 diff -urNp linux-2.6.32.42/include/asm-generic/atomic-long.h linux-2.6.32.42/include/asm-generic/atomic-long.h
53575 --- linux-2.6.32.42/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
53576 +++ linux-2.6.32.42/include/asm-generic/atomic-long.h 2011-05-16 21:46:57.000000000 -0400
53577 @@ -22,6 +22,12 @@
53578
53579 typedef atomic64_t atomic_long_t;
53580
53581 +#ifdef CONFIG_PAX_REFCOUNT
53582 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
53583 +#else
53584 +typedef atomic64_t atomic_long_unchecked_t;
53585 +#endif
53586 +
53587 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
53588
53589 static inline long atomic_long_read(atomic_long_t *l)
53590 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
53591 return (long)atomic64_read(v);
53592 }
53593
53594 +#ifdef CONFIG_PAX_REFCOUNT
53595 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53596 +{
53597 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53598 +
53599 + return (long)atomic64_read_unchecked(v);
53600 +}
53601 +#endif
53602 +
53603 static inline void atomic_long_set(atomic_long_t *l, long i)
53604 {
53605 atomic64_t *v = (atomic64_t *)l;
53606 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
53607 atomic64_set(v, i);
53608 }
53609
53610 +#ifdef CONFIG_PAX_REFCOUNT
53611 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53612 +{
53613 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53614 +
53615 + atomic64_set_unchecked(v, i);
53616 +}
53617 +#endif
53618 +
53619 static inline void atomic_long_inc(atomic_long_t *l)
53620 {
53621 atomic64_t *v = (atomic64_t *)l;
53622 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
53623 atomic64_inc(v);
53624 }
53625
53626 +#ifdef CONFIG_PAX_REFCOUNT
53627 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53628 +{
53629 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53630 +
53631 + atomic64_inc_unchecked(v);
53632 +}
53633 +#endif
53634 +
53635 static inline void atomic_long_dec(atomic_long_t *l)
53636 {
53637 atomic64_t *v = (atomic64_t *)l;
53638 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
53639 atomic64_dec(v);
53640 }
53641
53642 +#ifdef CONFIG_PAX_REFCOUNT
53643 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53644 +{
53645 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53646 +
53647 + atomic64_dec_unchecked(v);
53648 +}
53649 +#endif
53650 +
53651 static inline void atomic_long_add(long i, atomic_long_t *l)
53652 {
53653 atomic64_t *v = (atomic64_t *)l;
53654 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
53655 atomic64_add(i, v);
53656 }
53657
53658 +#ifdef CONFIG_PAX_REFCOUNT
53659 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53660 +{
53661 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53662 +
53663 + atomic64_add_unchecked(i, v);
53664 +}
53665 +#endif
53666 +
53667 static inline void atomic_long_sub(long i, atomic_long_t *l)
53668 {
53669 atomic64_t *v = (atomic64_t *)l;
53670 @@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
53671 return (long)atomic64_inc_return(v);
53672 }
53673
53674 +#ifdef CONFIG_PAX_REFCOUNT
53675 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53676 +{
53677 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
53678 +
53679 + return (long)atomic64_inc_return_unchecked(v);
53680 +}
53681 +#endif
53682 +
53683 static inline long atomic_long_dec_return(atomic_long_t *l)
53684 {
53685 atomic64_t *v = (atomic64_t *)l;
53686 @@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
53687
53688 typedef atomic_t atomic_long_t;
53689
53690 +#ifdef CONFIG_PAX_REFCOUNT
53691 +typedef atomic_unchecked_t atomic_long_unchecked_t;
53692 +#else
53693 +typedef atomic_t atomic_long_unchecked_t;
53694 +#endif
53695 +
53696 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
53697 static inline long atomic_long_read(atomic_long_t *l)
53698 {
53699 @@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
53700 return (long)atomic_read(v);
53701 }
53702
53703 +#ifdef CONFIG_PAX_REFCOUNT
53704 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
53705 +{
53706 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53707 +
53708 + return (long)atomic_read_unchecked(v);
53709 +}
53710 +#endif
53711 +
53712 static inline void atomic_long_set(atomic_long_t *l, long i)
53713 {
53714 atomic_t *v = (atomic_t *)l;
53715 @@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
53716 atomic_set(v, i);
53717 }
53718
53719 +#ifdef CONFIG_PAX_REFCOUNT
53720 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
53721 +{
53722 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53723 +
53724 + atomic_set_unchecked(v, i);
53725 +}
53726 +#endif
53727 +
53728 static inline void atomic_long_inc(atomic_long_t *l)
53729 {
53730 atomic_t *v = (atomic_t *)l;
53731 @@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
53732 atomic_inc(v);
53733 }
53734
53735 +#ifdef CONFIG_PAX_REFCOUNT
53736 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
53737 +{
53738 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53739 +
53740 + atomic_inc_unchecked(v);
53741 +}
53742 +#endif
53743 +
53744 static inline void atomic_long_dec(atomic_long_t *l)
53745 {
53746 atomic_t *v = (atomic_t *)l;
53747 @@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
53748 atomic_dec(v);
53749 }
53750
53751 +#ifdef CONFIG_PAX_REFCOUNT
53752 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
53753 +{
53754 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53755 +
53756 + atomic_dec_unchecked(v);
53757 +}
53758 +#endif
53759 +
53760 static inline void atomic_long_add(long i, atomic_long_t *l)
53761 {
53762 atomic_t *v = (atomic_t *)l;
53763 @@ -176,6 +278,15 @@ static inline void atomic_long_add(long
53764 atomic_add(i, v);
53765 }
53766
53767 +#ifdef CONFIG_PAX_REFCOUNT
53768 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
53769 +{
53770 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53771 +
53772 + atomic_add_unchecked(i, v);
53773 +}
53774 +#endif
53775 +
53776 static inline void atomic_long_sub(long i, atomic_long_t *l)
53777 {
53778 atomic_t *v = (atomic_t *)l;
53779 @@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
53780 return (long)atomic_inc_return(v);
53781 }
53782
53783 +#ifdef CONFIG_PAX_REFCOUNT
53784 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
53785 +{
53786 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
53787 +
53788 + return (long)atomic_inc_return_unchecked(v);
53789 +}
53790 +#endif
53791 +
53792 static inline long atomic_long_dec_return(atomic_long_t *l)
53793 {
53794 atomic_t *v = (atomic_t *)l;
53795 @@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
53796
53797 #endif /* BITS_PER_LONG == 64 */
53798
53799 +#ifdef CONFIG_PAX_REFCOUNT
53800 +static inline void pax_refcount_needs_these_functions(void)
53801 +{
53802 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
53803 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
53804 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
53805 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
53806 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
53807 + atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
53808 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
53809 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
53810 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
53811 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
53812 + atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
53813 +
53814 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
53815 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
53816 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
53817 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
53818 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
53819 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
53820 +}
53821 +#else
53822 +#define atomic_read_unchecked(v) atomic_read(v)
53823 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
53824 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
53825 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
53826 +#define atomic_inc_unchecked(v) atomic_inc(v)
53827 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
53828 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
53829 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
53830 +#define atomic_dec_unchecked(v) atomic_dec(v)
53831 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
53832 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
53833 +
53834 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
53835 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
53836 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
53837 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
53838 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
53839 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
53840 +#endif
53841 +
53842 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
53843 diff -urNp linux-2.6.32.42/include/asm-generic/cache.h linux-2.6.32.42/include/asm-generic/cache.h
53844 --- linux-2.6.32.42/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
53845 +++ linux-2.6.32.42/include/asm-generic/cache.h 2011-05-04 17:56:28.000000000 -0400
53846 @@ -6,7 +6,7 @@
53847 * cache lines need to provide their own cache.h.
53848 */
53849
53850 -#define L1_CACHE_SHIFT 5
53851 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
53852 +#define L1_CACHE_SHIFT 5U
53853 +#define L1_CACHE_BYTES (1U << L1_CACHE_SHIFT)
53854
53855 #endif /* __ASM_GENERIC_CACHE_H */
53856 diff -urNp linux-2.6.32.42/include/asm-generic/dma-mapping-common.h linux-2.6.32.42/include/asm-generic/dma-mapping-common.h
53857 --- linux-2.6.32.42/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
53858 +++ linux-2.6.32.42/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
53859 @@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
53860 enum dma_data_direction dir,
53861 struct dma_attrs *attrs)
53862 {
53863 - struct dma_map_ops *ops = get_dma_ops(dev);
53864 + const struct dma_map_ops *ops = get_dma_ops(dev);
53865 dma_addr_t addr;
53866
53867 kmemcheck_mark_initialized(ptr, size);
53868 @@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
53869 enum dma_data_direction dir,
53870 struct dma_attrs *attrs)
53871 {
53872 - struct dma_map_ops *ops = get_dma_ops(dev);
53873 + const struct dma_map_ops *ops = get_dma_ops(dev);
53874
53875 BUG_ON(!valid_dma_direction(dir));
53876 if (ops->unmap_page)
53877 @@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
53878 int nents, enum dma_data_direction dir,
53879 struct dma_attrs *attrs)
53880 {
53881 - struct dma_map_ops *ops = get_dma_ops(dev);
53882 + const struct dma_map_ops *ops = get_dma_ops(dev);
53883 int i, ents;
53884 struct scatterlist *s;
53885
53886 @@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
53887 int nents, enum dma_data_direction dir,
53888 struct dma_attrs *attrs)
53889 {
53890 - struct dma_map_ops *ops = get_dma_ops(dev);
53891 + const struct dma_map_ops *ops = get_dma_ops(dev);
53892
53893 BUG_ON(!valid_dma_direction(dir));
53894 debug_dma_unmap_sg(dev, sg, nents, dir);
53895 @@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
53896 size_t offset, size_t size,
53897 enum dma_data_direction dir)
53898 {
53899 - struct dma_map_ops *ops = get_dma_ops(dev);
53900 + const struct dma_map_ops *ops = get_dma_ops(dev);
53901 dma_addr_t addr;
53902
53903 kmemcheck_mark_initialized(page_address(page) + offset, size);
53904 @@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
53905 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
53906 size_t size, enum dma_data_direction dir)
53907 {
53908 - struct dma_map_ops *ops = get_dma_ops(dev);
53909 + const struct dma_map_ops *ops = get_dma_ops(dev);
53910
53911 BUG_ON(!valid_dma_direction(dir));
53912 if (ops->unmap_page)
53913 @@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
53914 size_t size,
53915 enum dma_data_direction dir)
53916 {
53917 - struct dma_map_ops *ops = get_dma_ops(dev);
53918 + const struct dma_map_ops *ops = get_dma_ops(dev);
53919
53920 BUG_ON(!valid_dma_direction(dir));
53921 if (ops->sync_single_for_cpu)
53922 @@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
53923 dma_addr_t addr, size_t size,
53924 enum dma_data_direction dir)
53925 {
53926 - struct dma_map_ops *ops = get_dma_ops(dev);
53927 + const struct dma_map_ops *ops = get_dma_ops(dev);
53928
53929 BUG_ON(!valid_dma_direction(dir));
53930 if (ops->sync_single_for_device)
53931 @@ -123,7 +123,7 @@ static inline void dma_sync_single_range
53932 size_t size,
53933 enum dma_data_direction dir)
53934 {
53935 - struct dma_map_ops *ops = get_dma_ops(dev);
53936 + const struct dma_map_ops *ops = get_dma_ops(dev);
53937
53938 BUG_ON(!valid_dma_direction(dir));
53939 if (ops->sync_single_range_for_cpu) {
53940 @@ -140,7 +140,7 @@ static inline void dma_sync_single_range
53941 size_t size,
53942 enum dma_data_direction dir)
53943 {
53944 - struct dma_map_ops *ops = get_dma_ops(dev);
53945 + const struct dma_map_ops *ops = get_dma_ops(dev);
53946
53947 BUG_ON(!valid_dma_direction(dir));
53948 if (ops->sync_single_range_for_device) {
53949 @@ -155,7 +155,7 @@ static inline void
53950 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
53951 int nelems, enum dma_data_direction dir)
53952 {
53953 - struct dma_map_ops *ops = get_dma_ops(dev);
53954 + const struct dma_map_ops *ops = get_dma_ops(dev);
53955
53956 BUG_ON(!valid_dma_direction(dir));
53957 if (ops->sync_sg_for_cpu)
53958 @@ -167,7 +167,7 @@ static inline void
53959 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
53960 int nelems, enum dma_data_direction dir)
53961 {
53962 - struct dma_map_ops *ops = get_dma_ops(dev);
53963 + const struct dma_map_ops *ops = get_dma_ops(dev);
53964
53965 BUG_ON(!valid_dma_direction(dir));
53966 if (ops->sync_sg_for_device)
53967 diff -urNp linux-2.6.32.42/include/asm-generic/futex.h linux-2.6.32.42/include/asm-generic/futex.h
53968 --- linux-2.6.32.42/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
53969 +++ linux-2.6.32.42/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
53970 @@ -6,7 +6,7 @@
53971 #include <asm/errno.h>
53972
53973 static inline int
53974 -futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
53975 +futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
53976 {
53977 int op = (encoded_op >> 28) & 7;
53978 int cmp = (encoded_op >> 24) & 15;
53979 @@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
53980 }
53981
53982 static inline int
53983 -futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
53984 +futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
53985 {
53986 return -ENOSYS;
53987 }
53988 diff -urNp linux-2.6.32.42/include/asm-generic/int-l64.h linux-2.6.32.42/include/asm-generic/int-l64.h
53989 --- linux-2.6.32.42/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
53990 +++ linux-2.6.32.42/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
53991 @@ -46,6 +46,8 @@ typedef unsigned int u32;
53992 typedef signed long s64;
53993 typedef unsigned long u64;
53994
53995 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
53996 +
53997 #define S8_C(x) x
53998 #define U8_C(x) x ## U
53999 #define S16_C(x) x
54000 diff -urNp linux-2.6.32.42/include/asm-generic/int-ll64.h linux-2.6.32.42/include/asm-generic/int-ll64.h
54001 --- linux-2.6.32.42/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
54002 +++ linux-2.6.32.42/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
54003 @@ -51,6 +51,8 @@ typedef unsigned int u32;
54004 typedef signed long long s64;
54005 typedef unsigned long long u64;
54006
54007 +typedef unsigned long long intoverflow_t;
54008 +
54009 #define S8_C(x) x
54010 #define U8_C(x) x ## U
54011 #define S16_C(x) x
54012 diff -urNp linux-2.6.32.42/include/asm-generic/kmap_types.h linux-2.6.32.42/include/asm-generic/kmap_types.h
54013 --- linux-2.6.32.42/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
54014 +++ linux-2.6.32.42/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
54015 @@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
54016 KMAP_D(16) KM_IRQ_PTE,
54017 KMAP_D(17) KM_NMI,
54018 KMAP_D(18) KM_NMI_PTE,
54019 -KMAP_D(19) KM_TYPE_NR
54020 +KMAP_D(19) KM_CLEARPAGE,
54021 +KMAP_D(20) KM_TYPE_NR
54022 };
54023
54024 #undef KMAP_D
54025 diff -urNp linux-2.6.32.42/include/asm-generic/pgtable.h linux-2.6.32.42/include/asm-generic/pgtable.h
54026 --- linux-2.6.32.42/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
54027 +++ linux-2.6.32.42/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
54028 @@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
54029 unsigned long size);
54030 #endif
54031
54032 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
54033 +static inline unsigned long pax_open_kernel(void) { return 0; }
54034 +#endif
54035 +
54036 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
54037 +static inline unsigned long pax_close_kernel(void) { return 0; }
54038 +#endif
54039 +
54040 #endif /* !__ASSEMBLY__ */
54041
54042 #endif /* _ASM_GENERIC_PGTABLE_H */
54043 diff -urNp linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h
54044 --- linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
54045 +++ linux-2.6.32.42/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
54046 @@ -1,14 +1,19 @@
54047 #ifndef _PGTABLE_NOPMD_H
54048 #define _PGTABLE_NOPMD_H
54049
54050 -#ifndef __ASSEMBLY__
54051 -
54052 #include <asm-generic/pgtable-nopud.h>
54053
54054 -struct mm_struct;
54055 -
54056 #define __PAGETABLE_PMD_FOLDED
54057
54058 +#define PMD_SHIFT PUD_SHIFT
54059 +#define PTRS_PER_PMD 1
54060 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
54061 +#define PMD_MASK (~(PMD_SIZE-1))
54062 +
54063 +#ifndef __ASSEMBLY__
54064 +
54065 +struct mm_struct;
54066 +
54067 /*
54068 * Having the pmd type consist of a pud gets the size right, and allows
54069 * us to conceptually access the pud entry that this pmd is folded into
54070 @@ -16,11 +21,6 @@ struct mm_struct;
54071 */
54072 typedef struct { pud_t pud; } pmd_t;
54073
54074 -#define PMD_SHIFT PUD_SHIFT
54075 -#define PTRS_PER_PMD 1
54076 -#define PMD_SIZE (1UL << PMD_SHIFT)
54077 -#define PMD_MASK (~(PMD_SIZE-1))
54078 -
54079 /*
54080 * The "pud_xxx()" functions here are trivial for a folded two-level
54081 * setup: the pmd is never bad, and a pmd always exists (as it's folded
54082 diff -urNp linux-2.6.32.42/include/asm-generic/pgtable-nopud.h linux-2.6.32.42/include/asm-generic/pgtable-nopud.h
54083 --- linux-2.6.32.42/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
54084 +++ linux-2.6.32.42/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
54085 @@ -1,10 +1,15 @@
54086 #ifndef _PGTABLE_NOPUD_H
54087 #define _PGTABLE_NOPUD_H
54088
54089 -#ifndef __ASSEMBLY__
54090 -
54091 #define __PAGETABLE_PUD_FOLDED
54092
54093 +#define PUD_SHIFT PGDIR_SHIFT
54094 +#define PTRS_PER_PUD 1
54095 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
54096 +#define PUD_MASK (~(PUD_SIZE-1))
54097 +
54098 +#ifndef __ASSEMBLY__
54099 +
54100 /*
54101 * Having the pud type consist of a pgd gets the size right, and allows
54102 * us to conceptually access the pgd entry that this pud is folded into
54103 @@ -12,11 +17,6 @@
54104 */
54105 typedef struct { pgd_t pgd; } pud_t;
54106
54107 -#define PUD_SHIFT PGDIR_SHIFT
54108 -#define PTRS_PER_PUD 1
54109 -#define PUD_SIZE (1UL << PUD_SHIFT)
54110 -#define PUD_MASK (~(PUD_SIZE-1))
54111 -
54112 /*
54113 * The "pgd_xxx()" functions here are trivial for a folded two-level
54114 * setup: the pud is never bad, and a pud always exists (as it's folded
54115 diff -urNp linux-2.6.32.42/include/asm-generic/vmlinux.lds.h linux-2.6.32.42/include/asm-generic/vmlinux.lds.h
54116 --- linux-2.6.32.42/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
54117 +++ linux-2.6.32.42/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
54118 @@ -199,6 +199,7 @@
54119 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
54120 VMLINUX_SYMBOL(__start_rodata) = .; \
54121 *(.rodata) *(.rodata.*) \
54122 + *(.data.read_only) \
54123 *(__vermagic) /* Kernel version magic */ \
54124 *(__markers_strings) /* Markers: strings */ \
54125 *(__tracepoints_strings)/* Tracepoints: strings */ \
54126 @@ -656,22 +657,24 @@
54127 * section in the linker script will go there too. @phdr should have
54128 * a leading colon.
54129 *
54130 - * Note that this macros defines __per_cpu_load as an absolute symbol.
54131 + * Note that this macros defines per_cpu_load as an absolute symbol.
54132 * If there is no need to put the percpu section at a predetermined
54133 * address, use PERCPU().
54134 */
54135 #define PERCPU_VADDR(vaddr, phdr) \
54136 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
54137 - .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
54138 + per_cpu_load = .; \
54139 + .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
54140 - LOAD_OFFSET) { \
54141 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
54142 VMLINUX_SYMBOL(__per_cpu_start) = .; \
54143 *(.data.percpu.first) \
54144 - *(.data.percpu.page_aligned) \
54145 *(.data.percpu) \
54146 + . = ALIGN(PAGE_SIZE); \
54147 + *(.data.percpu.page_aligned) \
54148 *(.data.percpu.shared_aligned) \
54149 VMLINUX_SYMBOL(__per_cpu_end) = .; \
54150 } phdr \
54151 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
54152 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
54153
54154 /**
54155 * PERCPU - define output section for percpu area, simple version
54156 diff -urNp linux-2.6.32.42/include/drm/drmP.h linux-2.6.32.42/include/drm/drmP.h
54157 --- linux-2.6.32.42/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
54158 +++ linux-2.6.32.42/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
54159 @@ -71,6 +71,7 @@
54160 #include <linux/workqueue.h>
54161 #include <linux/poll.h>
54162 #include <asm/pgalloc.h>
54163 +#include <asm/local.h>
54164 #include "drm.h"
54165
54166 #include <linux/idr.h>
54167 @@ -814,7 +815,7 @@ struct drm_driver {
54168 void (*vgaarb_irq)(struct drm_device *dev, bool state);
54169
54170 /* Driver private ops for this object */
54171 - struct vm_operations_struct *gem_vm_ops;
54172 + const struct vm_operations_struct *gem_vm_ops;
54173
54174 int major;
54175 int minor;
54176 @@ -917,7 +918,7 @@ struct drm_device {
54177
54178 /** \name Usage Counters */
54179 /*@{ */
54180 - int open_count; /**< Outstanding files open */
54181 + local_t open_count; /**< Outstanding files open */
54182 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
54183 atomic_t vma_count; /**< Outstanding vma areas open */
54184 int buf_use; /**< Buffers in use -- cannot alloc */
54185 @@ -928,7 +929,7 @@ struct drm_device {
54186 /*@{ */
54187 unsigned long counters;
54188 enum drm_stat_type types[15];
54189 - atomic_t counts[15];
54190 + atomic_unchecked_t counts[15];
54191 /*@} */
54192
54193 struct list_head filelist;
54194 @@ -1016,7 +1017,7 @@ struct drm_device {
54195 struct pci_controller *hose;
54196 #endif
54197 struct drm_sg_mem *sg; /**< Scatter gather memory */
54198 - unsigned int num_crtcs; /**< Number of CRTCs on this device */
54199 + unsigned int num_crtcs; /**< Number of CRTCs on this device */
54200 void *dev_private; /**< device private data */
54201 void *mm_private;
54202 struct address_space *dev_mapping;
54203 @@ -1042,11 +1043,11 @@ struct drm_device {
54204 spinlock_t object_name_lock;
54205 struct idr object_name_idr;
54206 atomic_t object_count;
54207 - atomic_t object_memory;
54208 + atomic_unchecked_t object_memory;
54209 atomic_t pin_count;
54210 - atomic_t pin_memory;
54211 + atomic_unchecked_t pin_memory;
54212 atomic_t gtt_count;
54213 - atomic_t gtt_memory;
54214 + atomic_unchecked_t gtt_memory;
54215 uint32_t gtt_total;
54216 uint32_t invalidate_domains; /* domains pending invalidation */
54217 uint32_t flush_domains; /* domains pending flush */
54218 diff -urNp linux-2.6.32.42/include/linux/a.out.h linux-2.6.32.42/include/linux/a.out.h
54219 --- linux-2.6.32.42/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
54220 +++ linux-2.6.32.42/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
54221 @@ -39,6 +39,14 @@ enum machine_type {
54222 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
54223 };
54224
54225 +/* Constants for the N_FLAGS field */
54226 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54227 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
54228 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
54229 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
54230 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54231 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54232 +
54233 #if !defined (N_MAGIC)
54234 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
54235 #endif
54236 diff -urNp linux-2.6.32.42/include/linux/atmdev.h linux-2.6.32.42/include/linux/atmdev.h
54237 --- linux-2.6.32.42/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
54238 +++ linux-2.6.32.42/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
54239 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
54240 #endif
54241
54242 struct k_atm_aal_stats {
54243 -#define __HANDLE_ITEM(i) atomic_t i
54244 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54245 __AAL_STAT_ITEMS
54246 #undef __HANDLE_ITEM
54247 };
54248 diff -urNp linux-2.6.32.42/include/linux/backlight.h linux-2.6.32.42/include/linux/backlight.h
54249 --- linux-2.6.32.42/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
54250 +++ linux-2.6.32.42/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
54251 @@ -36,18 +36,18 @@ struct backlight_device;
54252 struct fb_info;
54253
54254 struct backlight_ops {
54255 - unsigned int options;
54256 + const unsigned int options;
54257
54258 #define BL_CORE_SUSPENDRESUME (1 << 0)
54259
54260 /* Notify the backlight driver some property has changed */
54261 - int (*update_status)(struct backlight_device *);
54262 + int (* const update_status)(struct backlight_device *);
54263 /* Return the current backlight brightness (accounting for power,
54264 fb_blank etc.) */
54265 - int (*get_brightness)(struct backlight_device *);
54266 + int (* const get_brightness)(struct backlight_device *);
54267 /* Check if given framebuffer device is the one bound to this backlight;
54268 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
54269 - int (*check_fb)(struct fb_info *);
54270 + int (* const check_fb)(struct fb_info *);
54271 };
54272
54273 /* This structure defines all the properties of a backlight */
54274 @@ -86,7 +86,7 @@ struct backlight_device {
54275 registered this device has been unloaded, and if class_get_devdata()
54276 points to something in the body of that driver, it is also invalid. */
54277 struct mutex ops_lock;
54278 - struct backlight_ops *ops;
54279 + const struct backlight_ops *ops;
54280
54281 /* The framebuffer notifier block */
54282 struct notifier_block fb_notif;
54283 @@ -103,7 +103,7 @@ static inline void backlight_update_stat
54284 }
54285
54286 extern struct backlight_device *backlight_device_register(const char *name,
54287 - struct device *dev, void *devdata, struct backlight_ops *ops);
54288 + struct device *dev, void *devdata, const struct backlight_ops *ops);
54289 extern void backlight_device_unregister(struct backlight_device *bd);
54290 extern void backlight_force_update(struct backlight_device *bd,
54291 enum backlight_update_reason reason);
54292 diff -urNp linux-2.6.32.42/include/linux/binfmts.h linux-2.6.32.42/include/linux/binfmts.h
54293 --- linux-2.6.32.42/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
54294 +++ linux-2.6.32.42/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
54295 @@ -83,6 +83,7 @@ struct linux_binfmt {
54296 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
54297 int (*load_shlib)(struct file *);
54298 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
54299 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
54300 unsigned long min_coredump; /* minimal dump size */
54301 int hasvdso;
54302 };
54303 diff -urNp linux-2.6.32.42/include/linux/blkdev.h linux-2.6.32.42/include/linux/blkdev.h
54304 --- linux-2.6.32.42/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
54305 +++ linux-2.6.32.42/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
54306 @@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
54307 #endif /* CONFIG_BLK_DEV_INTEGRITY */
54308
54309 struct block_device_operations {
54310 - int (*open) (struct block_device *, fmode_t);
54311 - int (*release) (struct gendisk *, fmode_t);
54312 - int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54313 - int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54314 - int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54315 - int (*direct_access) (struct block_device *, sector_t,
54316 + int (* const open) (struct block_device *, fmode_t);
54317 + int (* const release) (struct gendisk *, fmode_t);
54318 + int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54319 + int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54320 + int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
54321 + int (* const direct_access) (struct block_device *, sector_t,
54322 void **, unsigned long *);
54323 - int (*media_changed) (struct gendisk *);
54324 - unsigned long long (*set_capacity) (struct gendisk *,
54325 + int (* const media_changed) (struct gendisk *);
54326 + unsigned long long (* const set_capacity) (struct gendisk *,
54327 unsigned long long);
54328 - int (*revalidate_disk) (struct gendisk *);
54329 - int (*getgeo)(struct block_device *, struct hd_geometry *);
54330 - struct module *owner;
54331 + int (* const revalidate_disk) (struct gendisk *);
54332 + int (*const getgeo)(struct block_device *, struct hd_geometry *);
54333 + struct module * const owner;
54334 };
54335
54336 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
54337 diff -urNp linux-2.6.32.42/include/linux/blktrace_api.h linux-2.6.32.42/include/linux/blktrace_api.h
54338 --- linux-2.6.32.42/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
54339 +++ linux-2.6.32.42/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
54340 @@ -160,7 +160,7 @@ struct blk_trace {
54341 struct dentry *dir;
54342 struct dentry *dropped_file;
54343 struct dentry *msg_file;
54344 - atomic_t dropped;
54345 + atomic_unchecked_t dropped;
54346 };
54347
54348 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
54349 diff -urNp linux-2.6.32.42/include/linux/byteorder/little_endian.h linux-2.6.32.42/include/linux/byteorder/little_endian.h
54350 --- linux-2.6.32.42/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
54351 +++ linux-2.6.32.42/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
54352 @@ -42,51 +42,51 @@
54353
54354 static inline __le64 __cpu_to_le64p(const __u64 *p)
54355 {
54356 - return (__force __le64)*p;
54357 + return (__force const __le64)*p;
54358 }
54359 static inline __u64 __le64_to_cpup(const __le64 *p)
54360 {
54361 - return (__force __u64)*p;
54362 + return (__force const __u64)*p;
54363 }
54364 static inline __le32 __cpu_to_le32p(const __u32 *p)
54365 {
54366 - return (__force __le32)*p;
54367 + return (__force const __le32)*p;
54368 }
54369 static inline __u32 __le32_to_cpup(const __le32 *p)
54370 {
54371 - return (__force __u32)*p;
54372 + return (__force const __u32)*p;
54373 }
54374 static inline __le16 __cpu_to_le16p(const __u16 *p)
54375 {
54376 - return (__force __le16)*p;
54377 + return (__force const __le16)*p;
54378 }
54379 static inline __u16 __le16_to_cpup(const __le16 *p)
54380 {
54381 - return (__force __u16)*p;
54382 + return (__force const __u16)*p;
54383 }
54384 static inline __be64 __cpu_to_be64p(const __u64 *p)
54385 {
54386 - return (__force __be64)__swab64p(p);
54387 + return (__force const __be64)__swab64p(p);
54388 }
54389 static inline __u64 __be64_to_cpup(const __be64 *p)
54390 {
54391 - return __swab64p((__u64 *)p);
54392 + return __swab64p((const __u64 *)p);
54393 }
54394 static inline __be32 __cpu_to_be32p(const __u32 *p)
54395 {
54396 - return (__force __be32)__swab32p(p);
54397 + return (__force const __be32)__swab32p(p);
54398 }
54399 static inline __u32 __be32_to_cpup(const __be32 *p)
54400 {
54401 - return __swab32p((__u32 *)p);
54402 + return __swab32p((const __u32 *)p);
54403 }
54404 static inline __be16 __cpu_to_be16p(const __u16 *p)
54405 {
54406 - return (__force __be16)__swab16p(p);
54407 + return (__force const __be16)__swab16p(p);
54408 }
54409 static inline __u16 __be16_to_cpup(const __be16 *p)
54410 {
54411 - return __swab16p((__u16 *)p);
54412 + return __swab16p((const __u16 *)p);
54413 }
54414 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
54415 #define __le64_to_cpus(x) do { (void)(x); } while (0)
54416 diff -urNp linux-2.6.32.42/include/linux/cache.h linux-2.6.32.42/include/linux/cache.h
54417 --- linux-2.6.32.42/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
54418 +++ linux-2.6.32.42/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
54419 @@ -16,6 +16,10 @@
54420 #define __read_mostly
54421 #endif
54422
54423 +#ifndef __read_only
54424 +#define __read_only __read_mostly
54425 +#endif
54426 +
54427 #ifndef ____cacheline_aligned
54428 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
54429 #endif
54430 diff -urNp linux-2.6.32.42/include/linux/capability.h linux-2.6.32.42/include/linux/capability.h
54431 --- linux-2.6.32.42/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
54432 +++ linux-2.6.32.42/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
54433 @@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
54434 (security_real_capable_noaudit((t), (cap)) == 0)
54435
54436 extern int capable(int cap);
54437 +int capable_nolog(int cap);
54438
54439 /* audit system wants to get cap info from files as well */
54440 struct dentry;
54441 diff -urNp linux-2.6.32.42/include/linux/compiler-gcc4.h linux-2.6.32.42/include/linux/compiler-gcc4.h
54442 --- linux-2.6.32.42/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
54443 +++ linux-2.6.32.42/include/linux/compiler-gcc4.h 2011-04-17 15:56:46.000000000 -0400
54444 @@ -36,4 +36,8 @@
54445 the kernel context */
54446 #define __cold __attribute__((__cold__))
54447
54448 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
54449 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
54450 +#define __bos0(ptr) __bos((ptr), 0)
54451 +#define __bos1(ptr) __bos((ptr), 1)
54452 #endif
54453 diff -urNp linux-2.6.32.42/include/linux/compiler.h linux-2.6.32.42/include/linux/compiler.h
54454 --- linux-2.6.32.42/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
54455 +++ linux-2.6.32.42/include/linux/compiler.h 2011-04-17 15:56:46.000000000 -0400
54456 @@ -256,6 +256,22 @@ void ftrace_likely_update(struct ftrace_
54457 #define __cold
54458 #endif
54459
54460 +#ifndef __alloc_size
54461 +#define __alloc_size
54462 +#endif
54463 +
54464 +#ifndef __bos
54465 +#define __bos
54466 +#endif
54467 +
54468 +#ifndef __bos0
54469 +#define __bos0
54470 +#endif
54471 +
54472 +#ifndef __bos1
54473 +#define __bos1
54474 +#endif
54475 +
54476 /* Simple shorthand for a section definition */
54477 #ifndef __section
54478 # define __section(S) __attribute__ ((__section__(#S)))
54479 @@ -278,6 +294,7 @@ void ftrace_likely_update(struct ftrace_
54480 * use is to mediate communication between process-level code and irq/NMI
54481 * handlers, all running on the same CPU.
54482 */
54483 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
54484 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
54485 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
54486
54487 #endif /* __LINUX_COMPILER_H */
54488 diff -urNp linux-2.6.32.42/include/linux/dcache.h linux-2.6.32.42/include/linux/dcache.h
54489 --- linux-2.6.32.42/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
54490 +++ linux-2.6.32.42/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
54491 @@ -119,6 +119,8 @@ struct dentry {
54492 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
54493 };
54494
54495 +#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
54496 +
54497 /*
54498 * dentry->d_lock spinlock nesting subclasses:
54499 *
54500 diff -urNp linux-2.6.32.42/include/linux/decompress/mm.h linux-2.6.32.42/include/linux/decompress/mm.h
54501 --- linux-2.6.32.42/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
54502 +++ linux-2.6.32.42/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
54503 @@ -78,7 +78,7 @@ static void free(void *where)
54504 * warnings when not needed (indeed large_malloc / large_free are not
54505 * needed by inflate */
54506
54507 -#define malloc(a) kmalloc(a, GFP_KERNEL)
54508 +#define malloc(a) kmalloc((a), GFP_KERNEL)
54509 #define free(a) kfree(a)
54510
54511 #define large_malloc(a) vmalloc(a)
54512 diff -urNp linux-2.6.32.42/include/linux/dma-mapping.h linux-2.6.32.42/include/linux/dma-mapping.h
54513 --- linux-2.6.32.42/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
54514 +++ linux-2.6.32.42/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
54515 @@ -16,50 +16,50 @@ enum dma_data_direction {
54516 };
54517
54518 struct dma_map_ops {
54519 - void* (*alloc_coherent)(struct device *dev, size_t size,
54520 + void* (* const alloc_coherent)(struct device *dev, size_t size,
54521 dma_addr_t *dma_handle, gfp_t gfp);
54522 - void (*free_coherent)(struct device *dev, size_t size,
54523 + void (* const free_coherent)(struct device *dev, size_t size,
54524 void *vaddr, dma_addr_t dma_handle);
54525 - dma_addr_t (*map_page)(struct device *dev, struct page *page,
54526 + dma_addr_t (* const map_page)(struct device *dev, struct page *page,
54527 unsigned long offset, size_t size,
54528 enum dma_data_direction dir,
54529 struct dma_attrs *attrs);
54530 - void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
54531 + void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
54532 size_t size, enum dma_data_direction dir,
54533 struct dma_attrs *attrs);
54534 - int (*map_sg)(struct device *dev, struct scatterlist *sg,
54535 + int (* const map_sg)(struct device *dev, struct scatterlist *sg,
54536 int nents, enum dma_data_direction dir,
54537 struct dma_attrs *attrs);
54538 - void (*unmap_sg)(struct device *dev,
54539 + void (* const unmap_sg)(struct device *dev,
54540 struct scatterlist *sg, int nents,
54541 enum dma_data_direction dir,
54542 struct dma_attrs *attrs);
54543 - void (*sync_single_for_cpu)(struct device *dev,
54544 + void (* const sync_single_for_cpu)(struct device *dev,
54545 dma_addr_t dma_handle, size_t size,
54546 enum dma_data_direction dir);
54547 - void (*sync_single_for_device)(struct device *dev,
54548 + void (* const sync_single_for_device)(struct device *dev,
54549 dma_addr_t dma_handle, size_t size,
54550 enum dma_data_direction dir);
54551 - void (*sync_single_range_for_cpu)(struct device *dev,
54552 + void (* const sync_single_range_for_cpu)(struct device *dev,
54553 dma_addr_t dma_handle,
54554 unsigned long offset,
54555 size_t size,
54556 enum dma_data_direction dir);
54557 - void (*sync_single_range_for_device)(struct device *dev,
54558 + void (* const sync_single_range_for_device)(struct device *dev,
54559 dma_addr_t dma_handle,
54560 unsigned long offset,
54561 size_t size,
54562 enum dma_data_direction dir);
54563 - void (*sync_sg_for_cpu)(struct device *dev,
54564 + void (* const sync_sg_for_cpu)(struct device *dev,
54565 struct scatterlist *sg, int nents,
54566 enum dma_data_direction dir);
54567 - void (*sync_sg_for_device)(struct device *dev,
54568 + void (* const sync_sg_for_device)(struct device *dev,
54569 struct scatterlist *sg, int nents,
54570 enum dma_data_direction dir);
54571 - int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
54572 - int (*dma_supported)(struct device *dev, u64 mask);
54573 + int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
54574 + int (* const dma_supported)(struct device *dev, u64 mask);
54575 int (*set_dma_mask)(struct device *dev, u64 mask);
54576 - int is_phys;
54577 + const int is_phys;
54578 };
54579
54580 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
54581 diff -urNp linux-2.6.32.42/include/linux/dst.h linux-2.6.32.42/include/linux/dst.h
54582 --- linux-2.6.32.42/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
54583 +++ linux-2.6.32.42/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
54584 @@ -380,7 +380,7 @@ struct dst_node
54585 struct thread_pool *pool;
54586
54587 /* Transaction IDs live here */
54588 - atomic_long_t gen;
54589 + atomic_long_unchecked_t gen;
54590
54591 /*
54592 * How frequently and how many times transaction
54593 diff -urNp linux-2.6.32.42/include/linux/elf.h linux-2.6.32.42/include/linux/elf.h
54594 --- linux-2.6.32.42/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
54595 +++ linux-2.6.32.42/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
54596 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
54597 #define PT_GNU_EH_FRAME 0x6474e550
54598
54599 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
54600 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
54601 +
54602 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
54603 +
54604 +/* Constants for the e_flags field */
54605 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54606 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
54607 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
54608 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
54609 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54610 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54611
54612 /* These constants define the different elf file types */
54613 #define ET_NONE 0
54614 @@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
54615 #define DT_DEBUG 21
54616 #define DT_TEXTREL 22
54617 #define DT_JMPREL 23
54618 +#define DT_FLAGS 30
54619 + #define DF_TEXTREL 0x00000004
54620 #define DT_ENCODING 32
54621 #define OLD_DT_LOOS 0x60000000
54622 #define DT_LOOS 0x6000000d
54623 @@ -230,6 +243,19 @@ typedef struct elf64_hdr {
54624 #define PF_W 0x2
54625 #define PF_X 0x1
54626
54627 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
54628 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
54629 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
54630 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
54631 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
54632 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
54633 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
54634 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
54635 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
54636 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
54637 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
54638 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
54639 +
54640 typedef struct elf32_phdr{
54641 Elf32_Word p_type;
54642 Elf32_Off p_offset;
54643 @@ -322,6 +348,8 @@ typedef struct elf64_shdr {
54644 #define EI_OSABI 7
54645 #define EI_PAD 8
54646
54647 +#define EI_PAX 14
54648 +
54649 #define ELFMAG0 0x7f /* EI_MAG */
54650 #define ELFMAG1 'E'
54651 #define ELFMAG2 'L'
54652 @@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
54653 #define elf_phdr elf32_phdr
54654 #define elf_note elf32_note
54655 #define elf_addr_t Elf32_Off
54656 +#define elf_dyn Elf32_Dyn
54657
54658 #else
54659
54660 @@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
54661 #define elf_phdr elf64_phdr
54662 #define elf_note elf64_note
54663 #define elf_addr_t Elf64_Off
54664 +#define elf_dyn Elf64_Dyn
54665
54666 #endif
54667
54668 diff -urNp linux-2.6.32.42/include/linux/fscache-cache.h linux-2.6.32.42/include/linux/fscache-cache.h
54669 --- linux-2.6.32.42/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
54670 +++ linux-2.6.32.42/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
54671 @@ -116,7 +116,7 @@ struct fscache_operation {
54672 #endif
54673 };
54674
54675 -extern atomic_t fscache_op_debug_id;
54676 +extern atomic_unchecked_t fscache_op_debug_id;
54677 extern const struct slow_work_ops fscache_op_slow_work_ops;
54678
54679 extern void fscache_enqueue_operation(struct fscache_operation *);
54680 @@ -134,7 +134,7 @@ static inline void fscache_operation_ini
54681 fscache_operation_release_t release)
54682 {
54683 atomic_set(&op->usage, 1);
54684 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
54685 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
54686 op->release = release;
54687 INIT_LIST_HEAD(&op->pend_link);
54688 fscache_set_op_state(op, "Init");
54689 diff -urNp linux-2.6.32.42/include/linux/fs.h linux-2.6.32.42/include/linux/fs.h
54690 --- linux-2.6.32.42/include/linux/fs.h 2011-03-27 14:31:47.000000000 -0400
54691 +++ linux-2.6.32.42/include/linux/fs.h 2011-04-17 15:56:46.000000000 -0400
54692 @@ -90,6 +90,11 @@ struct inodes_stat_t {
54693 /* Expect random access pattern */
54694 #define FMODE_RANDOM ((__force fmode_t)4096)
54695
54696 +/* Hack for grsec so as not to require read permission simply to execute
54697 + * a binary
54698 + */
54699 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
54700 +
54701 /*
54702 * The below are the various read and write types that we support. Some of
54703 * them include behavioral modifiers that send information down to the
54704 @@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
54705 unsigned long, unsigned long);
54706
54707 struct address_space_operations {
54708 - int (*writepage)(struct page *page, struct writeback_control *wbc);
54709 - int (*readpage)(struct file *, struct page *);
54710 - void (*sync_page)(struct page *);
54711 + int (* const writepage)(struct page *page, struct writeback_control *wbc);
54712 + int (* const readpage)(struct file *, struct page *);
54713 + void (* const sync_page)(struct page *);
54714
54715 /* Write back some dirty pages from this mapping. */
54716 - int (*writepages)(struct address_space *, struct writeback_control *);
54717 + int (* const writepages)(struct address_space *, struct writeback_control *);
54718
54719 /* Set a page dirty. Return true if this dirtied it */
54720 - int (*set_page_dirty)(struct page *page);
54721 + int (* const set_page_dirty)(struct page *page);
54722
54723 - int (*readpages)(struct file *filp, struct address_space *mapping,
54724 + int (* const readpages)(struct file *filp, struct address_space *mapping,
54725 struct list_head *pages, unsigned nr_pages);
54726
54727 - int (*write_begin)(struct file *, struct address_space *mapping,
54728 + int (* const write_begin)(struct file *, struct address_space *mapping,
54729 loff_t pos, unsigned len, unsigned flags,
54730 struct page **pagep, void **fsdata);
54731 - int (*write_end)(struct file *, struct address_space *mapping,
54732 + int (* const write_end)(struct file *, struct address_space *mapping,
54733 loff_t pos, unsigned len, unsigned copied,
54734 struct page *page, void *fsdata);
54735
54736 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
54737 - sector_t (*bmap)(struct address_space *, sector_t);
54738 - void (*invalidatepage) (struct page *, unsigned long);
54739 - int (*releasepage) (struct page *, gfp_t);
54740 - ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
54741 + sector_t (* const bmap)(struct address_space *, sector_t);
54742 + void (* const invalidatepage) (struct page *, unsigned long);
54743 + int (* const releasepage) (struct page *, gfp_t);
54744 + ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
54745 loff_t offset, unsigned long nr_segs);
54746 - int (*get_xip_mem)(struct address_space *, pgoff_t, int,
54747 + int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
54748 void **, unsigned long *);
54749 /* migrate the contents of a page to the specified target */
54750 - int (*migratepage) (struct address_space *,
54751 + int (* const migratepage) (struct address_space *,
54752 struct page *, struct page *);
54753 - int (*launder_page) (struct page *);
54754 - int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
54755 + int (* const launder_page) (struct page *);
54756 + int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
54757 unsigned long);
54758 - int (*error_remove_page)(struct address_space *, struct page *);
54759 + int (* const error_remove_page)(struct address_space *, struct page *);
54760 };
54761
54762 /*
54763 @@ -1030,19 +1035,19 @@ static inline int file_check_writeable(s
54764 typedef struct files_struct *fl_owner_t;
54765
54766 struct file_lock_operations {
54767 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
54768 - void (*fl_release_private)(struct file_lock *);
54769 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
54770 + void (* const fl_release_private)(struct file_lock *);
54771 };
54772
54773 struct lock_manager_operations {
54774 - int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
54775 - void (*fl_notify)(struct file_lock *); /* unblock callback */
54776 - int (*fl_grant)(struct file_lock *, struct file_lock *, int);
54777 - void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
54778 - void (*fl_release_private)(struct file_lock *);
54779 - void (*fl_break)(struct file_lock *);
54780 - int (*fl_mylease)(struct file_lock *, struct file_lock *);
54781 - int (*fl_change)(struct file_lock **, int);
54782 + int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
54783 + void (* const fl_notify)(struct file_lock *); /* unblock callback */
54784 + int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
54785 + void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
54786 + void (* const fl_release_private)(struct file_lock *);
54787 + void (* const fl_break)(struct file_lock *);
54788 + int (* const fl_mylease)(struct file_lock *, struct file_lock *);
54789 + int (* const fl_change)(struct file_lock **, int);
54790 };
54791
54792 struct lock_manager {
54793 @@ -1441,7 +1446,7 @@ struct fiemap_extent_info {
54794 unsigned int fi_flags; /* Flags as passed from user */
54795 unsigned int fi_extents_mapped; /* Number of mapped extents */
54796 unsigned int fi_extents_max; /* Size of fiemap_extent array */
54797 - struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
54798 + struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
54799 * array */
54800 };
54801 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
54802 @@ -1558,30 +1563,30 @@ extern ssize_t vfs_writev(struct file *,
54803 unsigned long, loff_t *);
54804
54805 struct super_operations {
54806 - struct inode *(*alloc_inode)(struct super_block *sb);
54807 - void (*destroy_inode)(struct inode *);
54808 + struct inode *(* const alloc_inode)(struct super_block *sb);
54809 + void (* const destroy_inode)(struct inode *);
54810
54811 - void (*dirty_inode) (struct inode *);
54812 - int (*write_inode) (struct inode *, int);
54813 - void (*drop_inode) (struct inode *);
54814 - void (*delete_inode) (struct inode *);
54815 - void (*put_super) (struct super_block *);
54816 - void (*write_super) (struct super_block *);
54817 - int (*sync_fs)(struct super_block *sb, int wait);
54818 - int (*freeze_fs) (struct super_block *);
54819 - int (*unfreeze_fs) (struct super_block *);
54820 - int (*statfs) (struct dentry *, struct kstatfs *);
54821 - int (*remount_fs) (struct super_block *, int *, char *);
54822 - void (*clear_inode) (struct inode *);
54823 - void (*umount_begin) (struct super_block *);
54824 + void (* const dirty_inode) (struct inode *);
54825 + int (* const write_inode) (struct inode *, int);
54826 + void (* const drop_inode) (struct inode *);
54827 + void (* const delete_inode) (struct inode *);
54828 + void (* const put_super) (struct super_block *);
54829 + void (* const write_super) (struct super_block *);
54830 + int (* const sync_fs)(struct super_block *sb, int wait);
54831 + int (* const freeze_fs) (struct super_block *);
54832 + int (* const unfreeze_fs) (struct super_block *);
54833 + int (* const statfs) (struct dentry *, struct kstatfs *);
54834 + int (* const remount_fs) (struct super_block *, int *, char *);
54835 + void (* const clear_inode) (struct inode *);
54836 + void (* const umount_begin) (struct super_block *);
54837
54838 - int (*show_options)(struct seq_file *, struct vfsmount *);
54839 - int (*show_stats)(struct seq_file *, struct vfsmount *);
54840 + int (* const show_options)(struct seq_file *, struct vfsmount *);
54841 + int (* const show_stats)(struct seq_file *, struct vfsmount *);
54842 #ifdef CONFIG_QUOTA
54843 - ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
54844 - ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
54845 + ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
54846 + ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
54847 #endif
54848 - int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
54849 + int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
54850 };
54851
54852 /*
54853 diff -urNp linux-2.6.32.42/include/linux/fs_struct.h linux-2.6.32.42/include/linux/fs_struct.h
54854 --- linux-2.6.32.42/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
54855 +++ linux-2.6.32.42/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
54856 @@ -4,7 +4,7 @@
54857 #include <linux/path.h>
54858
54859 struct fs_struct {
54860 - int users;
54861 + atomic_t users;
54862 rwlock_t lock;
54863 int umask;
54864 int in_exec;
54865 diff -urNp linux-2.6.32.42/include/linux/ftrace_event.h linux-2.6.32.42/include/linux/ftrace_event.h
54866 --- linux-2.6.32.42/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
54867 +++ linux-2.6.32.42/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
54868 @@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
54869 int filter_type);
54870 extern int trace_define_common_fields(struct ftrace_event_call *call);
54871
54872 -#define is_signed_type(type) (((type)(-1)) < 0)
54873 +#define is_signed_type(type) (((type)(-1)) < (type)1)
54874
54875 int trace_set_clr_event(const char *system, const char *event, int set);
54876
54877 diff -urNp linux-2.6.32.42/include/linux/genhd.h linux-2.6.32.42/include/linux/genhd.h
54878 --- linux-2.6.32.42/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
54879 +++ linux-2.6.32.42/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
54880 @@ -161,7 +161,7 @@ struct gendisk {
54881
54882 struct timer_rand_state *random;
54883
54884 - atomic_t sync_io; /* RAID */
54885 + atomic_unchecked_t sync_io; /* RAID */
54886 struct work_struct async_notify;
54887 #ifdef CONFIG_BLK_DEV_INTEGRITY
54888 struct blk_integrity *integrity;
54889 diff -urNp linux-2.6.32.42/include/linux/gracl.h linux-2.6.32.42/include/linux/gracl.h
54890 --- linux-2.6.32.42/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
54891 +++ linux-2.6.32.42/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
54892 @@ -0,0 +1,317 @@
54893 +#ifndef GR_ACL_H
54894 +#define GR_ACL_H
54895 +
54896 +#include <linux/grdefs.h>
54897 +#include <linux/resource.h>
54898 +#include <linux/capability.h>
54899 +#include <linux/dcache.h>
54900 +#include <asm/resource.h>
54901 +
54902 +/* Major status information */
54903 +
54904 +#define GR_VERSION "grsecurity 2.2.2"
54905 +#define GRSECURITY_VERSION 0x2202
54906 +
54907 +enum {
54908 + GR_SHUTDOWN = 0,
54909 + GR_ENABLE = 1,
54910 + GR_SPROLE = 2,
54911 + GR_RELOAD = 3,
54912 + GR_SEGVMOD = 4,
54913 + GR_STATUS = 5,
54914 + GR_UNSPROLE = 6,
54915 + GR_PASSSET = 7,
54916 + GR_SPROLEPAM = 8,
54917 +};
54918 +
54919 +/* Password setup definitions
54920 + * kernel/grhash.c */
54921 +enum {
54922 + GR_PW_LEN = 128,
54923 + GR_SALT_LEN = 16,
54924 + GR_SHA_LEN = 32,
54925 +};
54926 +
54927 +enum {
54928 + GR_SPROLE_LEN = 64,
54929 +};
54930 +
54931 +enum {
54932 + GR_NO_GLOB = 0,
54933 + GR_REG_GLOB,
54934 + GR_CREATE_GLOB
54935 +};
54936 +
54937 +#define GR_NLIMITS 32
54938 +
54939 +/* Begin Data Structures */
54940 +
54941 +struct sprole_pw {
54942 + unsigned char *rolename;
54943 + unsigned char salt[GR_SALT_LEN];
54944 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
54945 +};
54946 +
54947 +struct name_entry {
54948 + __u32 key;
54949 + ino_t inode;
54950 + dev_t device;
54951 + char *name;
54952 + __u16 len;
54953 + __u8 deleted;
54954 + struct name_entry *prev;
54955 + struct name_entry *next;
54956 +};
54957 +
54958 +struct inodev_entry {
54959 + struct name_entry *nentry;
54960 + struct inodev_entry *prev;
54961 + struct inodev_entry *next;
54962 +};
54963 +
54964 +struct acl_role_db {
54965 + struct acl_role_label **r_hash;
54966 + __u32 r_size;
54967 +};
54968 +
54969 +struct inodev_db {
54970 + struct inodev_entry **i_hash;
54971 + __u32 i_size;
54972 +};
54973 +
54974 +struct name_db {
54975 + struct name_entry **n_hash;
54976 + __u32 n_size;
54977 +};
54978 +
54979 +struct crash_uid {
54980 + uid_t uid;
54981 + unsigned long expires;
54982 +};
54983 +
54984 +struct gr_hash_struct {
54985 + void **table;
54986 + void **nametable;
54987 + void *first;
54988 + __u32 table_size;
54989 + __u32 used_size;
54990 + int type;
54991 +};
54992 +
54993 +/* Userspace Grsecurity ACL data structures */
54994 +
54995 +struct acl_subject_label {
54996 + char *filename;
54997 + ino_t inode;
54998 + dev_t device;
54999 + __u32 mode;
55000 + kernel_cap_t cap_mask;
55001 + kernel_cap_t cap_lower;
55002 + kernel_cap_t cap_invert_audit;
55003 +
55004 + struct rlimit res[GR_NLIMITS];
55005 + __u32 resmask;
55006 +
55007 + __u8 user_trans_type;
55008 + __u8 group_trans_type;
55009 + uid_t *user_transitions;
55010 + gid_t *group_transitions;
55011 + __u16 user_trans_num;
55012 + __u16 group_trans_num;
55013 +
55014 + __u32 sock_families[2];
55015 + __u32 ip_proto[8];
55016 + __u32 ip_type;
55017 + struct acl_ip_label **ips;
55018 + __u32 ip_num;
55019 + __u32 inaddr_any_override;
55020 +
55021 + __u32 crashes;
55022 + unsigned long expires;
55023 +
55024 + struct acl_subject_label *parent_subject;
55025 + struct gr_hash_struct *hash;
55026 + struct acl_subject_label *prev;
55027 + struct acl_subject_label *next;
55028 +
55029 + struct acl_object_label **obj_hash;
55030 + __u32 obj_hash_size;
55031 + __u16 pax_flags;
55032 +};
55033 +
55034 +struct role_allowed_ip {
55035 + __u32 addr;
55036 + __u32 netmask;
55037 +
55038 + struct role_allowed_ip *prev;
55039 + struct role_allowed_ip *next;
55040 +};
55041 +
55042 +struct role_transition {
55043 + char *rolename;
55044 +
55045 + struct role_transition *prev;
55046 + struct role_transition *next;
55047 +};
55048 +
55049 +struct acl_role_label {
55050 + char *rolename;
55051 + uid_t uidgid;
55052 + __u16 roletype;
55053 +
55054 + __u16 auth_attempts;
55055 + unsigned long expires;
55056 +
55057 + struct acl_subject_label *root_label;
55058 + struct gr_hash_struct *hash;
55059 +
55060 + struct acl_role_label *prev;
55061 + struct acl_role_label *next;
55062 +
55063 + struct role_transition *transitions;
55064 + struct role_allowed_ip *allowed_ips;
55065 + uid_t *domain_children;
55066 + __u16 domain_child_num;
55067 +
55068 + struct acl_subject_label **subj_hash;
55069 + __u32 subj_hash_size;
55070 +};
55071 +
55072 +struct user_acl_role_db {
55073 + struct acl_role_label **r_table;
55074 + __u32 num_pointers; /* Number of allocations to track */
55075 + __u32 num_roles; /* Number of roles */
55076 + __u32 num_domain_children; /* Number of domain children */
55077 + __u32 num_subjects; /* Number of subjects */
55078 + __u32 num_objects; /* Number of objects */
55079 +};
55080 +
55081 +struct acl_object_label {
55082 + char *filename;
55083 + ino_t inode;
55084 + dev_t device;
55085 + __u32 mode;
55086 +
55087 + struct acl_subject_label *nested;
55088 + struct acl_object_label *globbed;
55089 +
55090 + /* next two structures not used */
55091 +
55092 + struct acl_object_label *prev;
55093 + struct acl_object_label *next;
55094 +};
55095 +
55096 +struct acl_ip_label {
55097 + char *iface;
55098 + __u32 addr;
55099 + __u32 netmask;
55100 + __u16 low, high;
55101 + __u8 mode;
55102 + __u32 type;
55103 + __u32 proto[8];
55104 +
55105 + /* next two structures not used */
55106 +
55107 + struct acl_ip_label *prev;
55108 + struct acl_ip_label *next;
55109 +};
55110 +
55111 +struct gr_arg {
55112 + struct user_acl_role_db role_db;
55113 + unsigned char pw[GR_PW_LEN];
55114 + unsigned char salt[GR_SALT_LEN];
55115 + unsigned char sum[GR_SHA_LEN];
55116 + unsigned char sp_role[GR_SPROLE_LEN];
55117 + struct sprole_pw *sprole_pws;
55118 + dev_t segv_device;
55119 + ino_t segv_inode;
55120 + uid_t segv_uid;
55121 + __u16 num_sprole_pws;
55122 + __u16 mode;
55123 +};
55124 +
55125 +struct gr_arg_wrapper {
55126 + struct gr_arg *arg;
55127 + __u32 version;
55128 + __u32 size;
55129 +};
55130 +
55131 +struct subject_map {
55132 + struct acl_subject_label *user;
55133 + struct acl_subject_label *kernel;
55134 + struct subject_map *prev;
55135 + struct subject_map *next;
55136 +};
55137 +
55138 +struct acl_subj_map_db {
55139 + struct subject_map **s_hash;
55140 + __u32 s_size;
55141 +};
55142 +
55143 +/* End Data Structures Section */
55144 +
55145 +/* Hash functions generated by empirical testing by Brad Spengler
55146 + Makes good use of the low bits of the inode. Generally 0-1 times
55147 + in loop for successful match. 0-3 for unsuccessful match.
55148 + Shift/add algorithm with modulus of table size and an XOR*/
55149 +
55150 +static __inline__ unsigned int
55151 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
55152 +{
55153 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
55154 +}
55155 +
55156 + static __inline__ unsigned int
55157 +shash(const struct acl_subject_label *userp, const unsigned int sz)
55158 +{
55159 + return ((const unsigned long)userp % sz);
55160 +}
55161 +
55162 +static __inline__ unsigned int
55163 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
55164 +{
55165 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
55166 +}
55167 +
55168 +static __inline__ unsigned int
55169 +nhash(const char *name, const __u16 len, const unsigned int sz)
55170 +{
55171 + return full_name_hash((const unsigned char *)name, len) % sz;
55172 +}
55173 +
55174 +#define FOR_EACH_ROLE_START(role) \
55175 + role = role_list; \
55176 + while (role) {
55177 +
55178 +#define FOR_EACH_ROLE_END(role) \
55179 + role = role->prev; \
55180 + }
55181 +
55182 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
55183 + subj = NULL; \
55184 + iter = 0; \
55185 + while (iter < role->subj_hash_size) { \
55186 + if (subj == NULL) \
55187 + subj = role->subj_hash[iter]; \
55188 + if (subj == NULL) { \
55189 + iter++; \
55190 + continue; \
55191 + }
55192 +
55193 +#define FOR_EACH_SUBJECT_END(subj,iter) \
55194 + subj = subj->next; \
55195 + if (subj == NULL) \
55196 + iter++; \
55197 + }
55198 +
55199 +
55200 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
55201 + subj = role->hash->first; \
55202 + while (subj != NULL) {
55203 +
55204 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
55205 + subj = subj->next; \
55206 + }
55207 +
55208 +#endif
55209 +
55210 diff -urNp linux-2.6.32.42/include/linux/gralloc.h linux-2.6.32.42/include/linux/gralloc.h
55211 --- linux-2.6.32.42/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
55212 +++ linux-2.6.32.42/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
55213 @@ -0,0 +1,9 @@
55214 +#ifndef __GRALLOC_H
55215 +#define __GRALLOC_H
55216 +
55217 +void acl_free_all(void);
55218 +int acl_alloc_stack_init(unsigned long size);
55219 +void *acl_alloc(unsigned long len);
55220 +void *acl_alloc_num(unsigned long num, unsigned long len);
55221 +
55222 +#endif
55223 diff -urNp linux-2.6.32.42/include/linux/grdefs.h linux-2.6.32.42/include/linux/grdefs.h
55224 --- linux-2.6.32.42/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
55225 +++ linux-2.6.32.42/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
55226 @@ -0,0 +1,140 @@
55227 +#ifndef GRDEFS_H
55228 +#define GRDEFS_H
55229 +
55230 +/* Begin grsecurity status declarations */
55231 +
55232 +enum {
55233 + GR_READY = 0x01,
55234 + GR_STATUS_INIT = 0x00 // disabled state
55235 +};
55236 +
55237 +/* Begin ACL declarations */
55238 +
55239 +/* Role flags */
55240 +
55241 +enum {
55242 + GR_ROLE_USER = 0x0001,
55243 + GR_ROLE_GROUP = 0x0002,
55244 + GR_ROLE_DEFAULT = 0x0004,
55245 + GR_ROLE_SPECIAL = 0x0008,
55246 + GR_ROLE_AUTH = 0x0010,
55247 + GR_ROLE_NOPW = 0x0020,
55248 + GR_ROLE_GOD = 0x0040,
55249 + GR_ROLE_LEARN = 0x0080,
55250 + GR_ROLE_TPE = 0x0100,
55251 + GR_ROLE_DOMAIN = 0x0200,
55252 + GR_ROLE_PAM = 0x0400,
55253 + GR_ROLE_PERSIST = 0x800
55254 +};
55255 +
55256 +/* ACL Subject and Object mode flags */
55257 +enum {
55258 + GR_DELETED = 0x80000000
55259 +};
55260 +
55261 +/* ACL Object-only mode flags */
55262 +enum {
55263 + GR_READ = 0x00000001,
55264 + GR_APPEND = 0x00000002,
55265 + GR_WRITE = 0x00000004,
55266 + GR_EXEC = 0x00000008,
55267 + GR_FIND = 0x00000010,
55268 + GR_INHERIT = 0x00000020,
55269 + GR_SETID = 0x00000040,
55270 + GR_CREATE = 0x00000080,
55271 + GR_DELETE = 0x00000100,
55272 + GR_LINK = 0x00000200,
55273 + GR_AUDIT_READ = 0x00000400,
55274 + GR_AUDIT_APPEND = 0x00000800,
55275 + GR_AUDIT_WRITE = 0x00001000,
55276 + GR_AUDIT_EXEC = 0x00002000,
55277 + GR_AUDIT_FIND = 0x00004000,
55278 + GR_AUDIT_INHERIT= 0x00008000,
55279 + GR_AUDIT_SETID = 0x00010000,
55280 + GR_AUDIT_CREATE = 0x00020000,
55281 + GR_AUDIT_DELETE = 0x00040000,
55282 + GR_AUDIT_LINK = 0x00080000,
55283 + GR_PTRACERD = 0x00100000,
55284 + GR_NOPTRACE = 0x00200000,
55285 + GR_SUPPRESS = 0x00400000,
55286 + GR_NOLEARN = 0x00800000,
55287 + GR_INIT_TRANSFER= 0x01000000
55288 +};
55289 +
55290 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
55291 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
55292 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
55293 +
55294 +/* ACL subject-only mode flags */
55295 +enum {
55296 + GR_KILL = 0x00000001,
55297 + GR_VIEW = 0x00000002,
55298 + GR_PROTECTED = 0x00000004,
55299 + GR_LEARN = 0x00000008,
55300 + GR_OVERRIDE = 0x00000010,
55301 + /* just a placeholder, this mode is only used in userspace */
55302 + GR_DUMMY = 0x00000020,
55303 + GR_PROTSHM = 0x00000040,
55304 + GR_KILLPROC = 0x00000080,
55305 + GR_KILLIPPROC = 0x00000100,
55306 + /* just a placeholder, this mode is only used in userspace */
55307 + GR_NOTROJAN = 0x00000200,
55308 + GR_PROTPROCFD = 0x00000400,
55309 + GR_PROCACCT = 0x00000800,
55310 + GR_RELAXPTRACE = 0x00001000,
55311 + GR_NESTED = 0x00002000,
55312 + GR_INHERITLEARN = 0x00004000,
55313 + GR_PROCFIND = 0x00008000,
55314 + GR_POVERRIDE = 0x00010000,
55315 + GR_KERNELAUTH = 0x00020000,
55316 + GR_ATSECURE = 0x00040000,
55317 + GR_SHMEXEC = 0x00080000
55318 +};
55319 +
55320 +enum {
55321 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
55322 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
55323 + GR_PAX_ENABLE_MPROTECT = 0x0004,
55324 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
55325 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
55326 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
55327 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
55328 + GR_PAX_DISABLE_MPROTECT = 0x0400,
55329 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
55330 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
55331 +};
55332 +
55333 +enum {
55334 + GR_ID_USER = 0x01,
55335 + GR_ID_GROUP = 0x02,
55336 +};
55337 +
55338 +enum {
55339 + GR_ID_ALLOW = 0x01,
55340 + GR_ID_DENY = 0x02,
55341 +};
55342 +
55343 +#define GR_CRASH_RES 31
55344 +#define GR_UIDTABLE_MAX 500
55345 +
55346 +/* begin resource learning section */
55347 +enum {
55348 + GR_RLIM_CPU_BUMP = 60,
55349 + GR_RLIM_FSIZE_BUMP = 50000,
55350 + GR_RLIM_DATA_BUMP = 10000,
55351 + GR_RLIM_STACK_BUMP = 1000,
55352 + GR_RLIM_CORE_BUMP = 10000,
55353 + GR_RLIM_RSS_BUMP = 500000,
55354 + GR_RLIM_NPROC_BUMP = 1,
55355 + GR_RLIM_NOFILE_BUMP = 5,
55356 + GR_RLIM_MEMLOCK_BUMP = 50000,
55357 + GR_RLIM_AS_BUMP = 500000,
55358 + GR_RLIM_LOCKS_BUMP = 2,
55359 + GR_RLIM_SIGPENDING_BUMP = 5,
55360 + GR_RLIM_MSGQUEUE_BUMP = 10000,
55361 + GR_RLIM_NICE_BUMP = 1,
55362 + GR_RLIM_RTPRIO_BUMP = 1,
55363 + GR_RLIM_RTTIME_BUMP = 1000000
55364 +};
55365 +
55366 +#endif
55367 diff -urNp linux-2.6.32.42/include/linux/grinternal.h linux-2.6.32.42/include/linux/grinternal.h
55368 --- linux-2.6.32.42/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
55369 +++ linux-2.6.32.42/include/linux/grinternal.h 2011-04-17 15:56:46.000000000 -0400
55370 @@ -0,0 +1,218 @@
55371 +#ifndef __GRINTERNAL_H
55372 +#define __GRINTERNAL_H
55373 +
55374 +#ifdef CONFIG_GRKERNSEC
55375 +
55376 +#include <linux/fs.h>
55377 +#include <linux/mnt_namespace.h>
55378 +#include <linux/nsproxy.h>
55379 +#include <linux/gracl.h>
55380 +#include <linux/grdefs.h>
55381 +#include <linux/grmsg.h>
55382 +
55383 +void gr_add_learn_entry(const char *fmt, ...)
55384 + __attribute__ ((format (printf, 1, 2)));
55385 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
55386 + const struct vfsmount *mnt);
55387 +__u32 gr_check_create(const struct dentry *new_dentry,
55388 + const struct dentry *parent,
55389 + const struct vfsmount *mnt, const __u32 mode);
55390 +int gr_check_protected_task(const struct task_struct *task);
55391 +__u32 to_gr_audit(const __u32 reqmode);
55392 +int gr_set_acls(const int type);
55393 +int gr_apply_subject_to_task(struct task_struct *task);
55394 +int gr_acl_is_enabled(void);
55395 +char gr_roletype_to_char(void);
55396 +
55397 +void gr_handle_alertkill(struct task_struct *task);
55398 +char *gr_to_filename(const struct dentry *dentry,
55399 + const struct vfsmount *mnt);
55400 +char *gr_to_filename1(const struct dentry *dentry,
55401 + const struct vfsmount *mnt);
55402 +char *gr_to_filename2(const struct dentry *dentry,
55403 + const struct vfsmount *mnt);
55404 +char *gr_to_filename3(const struct dentry *dentry,
55405 + const struct vfsmount *mnt);
55406 +
55407 +extern int grsec_enable_harden_ptrace;
55408 +extern int grsec_enable_link;
55409 +extern int grsec_enable_fifo;
55410 +extern int grsec_enable_execve;
55411 +extern int grsec_enable_shm;
55412 +extern int grsec_enable_execlog;
55413 +extern int grsec_enable_signal;
55414 +extern int grsec_enable_audit_ptrace;
55415 +extern int grsec_enable_forkfail;
55416 +extern int grsec_enable_time;
55417 +extern int grsec_enable_rofs;
55418 +extern int grsec_enable_chroot_shmat;
55419 +extern int grsec_enable_chroot_findtask;
55420 +extern int grsec_enable_chroot_mount;
55421 +extern int grsec_enable_chroot_double;
55422 +extern int grsec_enable_chroot_pivot;
55423 +extern int grsec_enable_chroot_chdir;
55424 +extern int grsec_enable_chroot_chmod;
55425 +extern int grsec_enable_chroot_mknod;
55426 +extern int grsec_enable_chroot_fchdir;
55427 +extern int grsec_enable_chroot_nice;
55428 +extern int grsec_enable_chroot_execlog;
55429 +extern int grsec_enable_chroot_caps;
55430 +extern int grsec_enable_chroot_sysctl;
55431 +extern int grsec_enable_chroot_unix;
55432 +extern int grsec_enable_tpe;
55433 +extern int grsec_tpe_gid;
55434 +extern int grsec_enable_tpe_all;
55435 +extern int grsec_enable_tpe_invert;
55436 +extern int grsec_enable_socket_all;
55437 +extern int grsec_socket_all_gid;
55438 +extern int grsec_enable_socket_client;
55439 +extern int grsec_socket_client_gid;
55440 +extern int grsec_enable_socket_server;
55441 +extern int grsec_socket_server_gid;
55442 +extern int grsec_audit_gid;
55443 +extern int grsec_enable_group;
55444 +extern int grsec_enable_audit_textrel;
55445 +extern int grsec_enable_log_rwxmaps;
55446 +extern int grsec_enable_mount;
55447 +extern int grsec_enable_chdir;
55448 +extern int grsec_resource_logging;
55449 +extern int grsec_enable_blackhole;
55450 +extern int grsec_lastack_retries;
55451 +extern int grsec_lock;
55452 +
55453 +extern spinlock_t grsec_alert_lock;
55454 +extern unsigned long grsec_alert_wtime;
55455 +extern unsigned long grsec_alert_fyet;
55456 +
55457 +extern spinlock_t grsec_audit_lock;
55458 +
55459 +extern rwlock_t grsec_exec_file_lock;
55460 +
55461 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
55462 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
55463 + (tsk)->exec_file->f_vfsmnt) : "/")
55464 +
55465 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
55466 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
55467 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55468 +
55469 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
55470 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
55471 + (tsk)->exec_file->f_vfsmnt) : "/")
55472 +
55473 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
55474 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
55475 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
55476 +
55477 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
55478 +
55479 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
55480 +
55481 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
55482 + (task)->pid, (cred)->uid, \
55483 + (cred)->euid, (cred)->gid, (cred)->egid, \
55484 + gr_parent_task_fullpath(task), \
55485 + (task)->real_parent->comm, (task)->real_parent->pid, \
55486 + (pcred)->uid, (pcred)->euid, \
55487 + (pcred)->gid, (pcred)->egid
55488 +
55489 +#define GR_CHROOT_CAPS {{ \
55490 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
55491 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
55492 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
55493 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
55494 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
55495 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
55496 +
55497 +#define security_learn(normal_msg,args...) \
55498 +({ \
55499 + read_lock(&grsec_exec_file_lock); \
55500 + gr_add_learn_entry(normal_msg "\n", ## args); \
55501 + read_unlock(&grsec_exec_file_lock); \
55502 +})
55503 +
55504 +enum {
55505 + GR_DO_AUDIT,
55506 + GR_DONT_AUDIT,
55507 + GR_DONT_AUDIT_GOOD
55508 +};
55509 +
55510 +enum {
55511 + GR_TTYSNIFF,
55512 + GR_RBAC,
55513 + GR_RBAC_STR,
55514 + GR_STR_RBAC,
55515 + GR_RBAC_MODE2,
55516 + GR_RBAC_MODE3,
55517 + GR_FILENAME,
55518 + GR_SYSCTL_HIDDEN,
55519 + GR_NOARGS,
55520 + GR_ONE_INT,
55521 + GR_ONE_INT_TWO_STR,
55522 + GR_ONE_STR,
55523 + GR_STR_INT,
55524 + GR_TWO_STR_INT,
55525 + GR_TWO_INT,
55526 + GR_TWO_U64,
55527 + GR_THREE_INT,
55528 + GR_FIVE_INT_TWO_STR,
55529 + GR_TWO_STR,
55530 + GR_THREE_STR,
55531 + GR_FOUR_STR,
55532 + GR_STR_FILENAME,
55533 + GR_FILENAME_STR,
55534 + GR_FILENAME_TWO_INT,
55535 + GR_FILENAME_TWO_INT_STR,
55536 + GR_TEXTREL,
55537 + GR_PTRACE,
55538 + GR_RESOURCE,
55539 + GR_CAP,
55540 + GR_SIG,
55541 + GR_SIG2,
55542 + GR_CRASH1,
55543 + GR_CRASH2,
55544 + GR_PSACCT,
55545 + GR_RWXMAP
55546 +};
55547 +
55548 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
55549 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
55550 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
55551 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
55552 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
55553 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
55554 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
55555 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
55556 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
55557 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
55558 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
55559 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
55560 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
55561 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
55562 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
55563 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
55564 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
55565 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
55566 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
55567 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
55568 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
55569 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
55570 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
55571 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
55572 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
55573 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
55574 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
55575 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
55576 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
55577 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
55578 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
55579 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
55580 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
55581 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
55582 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
55583 +
55584 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
55585 +
55586 +#endif
55587 +
55588 +#endif
55589 diff -urNp linux-2.6.32.42/include/linux/grmsg.h linux-2.6.32.42/include/linux/grmsg.h
55590 --- linux-2.6.32.42/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
55591 +++ linux-2.6.32.42/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
55592 @@ -0,0 +1,108 @@
55593 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
55594 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
55595 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
55596 +#define GR_STOPMOD_MSG "denied modification of module state by "
55597 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
55598 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
55599 +#define GR_IOPERM_MSG "denied use of ioperm() by "
55600 +#define GR_IOPL_MSG "denied use of iopl() by "
55601 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
55602 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
55603 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
55604 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
55605 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
55606 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
55607 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
55608 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
55609 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
55610 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
55611 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
55612 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
55613 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
55614 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
55615 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
55616 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
55617 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
55618 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
55619 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
55620 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
55621 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
55622 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
55623 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
55624 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
55625 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
55626 +#define GR_NPROC_MSG "denied overstep of process limit by "
55627 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
55628 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
55629 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
55630 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
55631 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
55632 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
55633 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
55634 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
55635 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
55636 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
55637 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
55638 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
55639 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
55640 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
55641 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
55642 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
55643 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
55644 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
55645 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
55646 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
55647 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
55648 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
55649 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
55650 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
55651 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
55652 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
55653 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
55654 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
55655 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
55656 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
55657 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
55658 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
55659 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
55660 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
55661 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
55662 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
55663 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
55664 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
55665 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
55666 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
55667 +#define GR_NICE_CHROOT_MSG "denied priority change by "
55668 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
55669 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
55670 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
55671 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
55672 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
55673 +#define GR_TIME_MSG "time set by "
55674 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
55675 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
55676 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
55677 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
55678 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
55679 +#define GR_BIND_MSG "denied bind() by "
55680 +#define GR_CONNECT_MSG "denied connect() by "
55681 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
55682 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
55683 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
55684 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
55685 +#define GR_CAP_ACL_MSG "use of %s denied for "
55686 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
55687 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
55688 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
55689 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
55690 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
55691 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
55692 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
55693 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
55694 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
55695 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
55696 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
55697 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
55698 +#define GR_VM86_MSG "denied use of vm86 by "
55699 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
55700 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
55701 diff -urNp linux-2.6.32.42/include/linux/grsecurity.h linux-2.6.32.42/include/linux/grsecurity.h
55702 --- linux-2.6.32.42/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
55703 +++ linux-2.6.32.42/include/linux/grsecurity.h 2011-04-17 15:56:46.000000000 -0400
55704 @@ -0,0 +1,212 @@
55705 +#ifndef GR_SECURITY_H
55706 +#define GR_SECURITY_H
55707 +#include <linux/fs.h>
55708 +#include <linux/fs_struct.h>
55709 +#include <linux/binfmts.h>
55710 +#include <linux/gracl.h>
55711 +#include <linux/compat.h>
55712 +
55713 +/* notify of brain-dead configs */
55714 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
55715 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
55716 +#endif
55717 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55718 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55719 +#endif
55720 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
55721 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
55722 +#endif
55723 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
55724 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
55725 +#endif
55726 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
55727 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
55728 +#endif
55729 +
55730 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
55731 +void gr_handle_brute_check(void);
55732 +void gr_handle_kernel_exploit(void);
55733 +int gr_process_user_ban(void);
55734 +
55735 +char gr_roletype_to_char(void);
55736 +
55737 +int gr_acl_enable_at_secure(void);
55738 +
55739 +int gr_check_user_change(int real, int effective, int fs);
55740 +int gr_check_group_change(int real, int effective, int fs);
55741 +
55742 +void gr_del_task_from_ip_table(struct task_struct *p);
55743 +
55744 +int gr_pid_is_chrooted(struct task_struct *p);
55745 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
55746 +int gr_handle_chroot_nice(void);
55747 +int gr_handle_chroot_sysctl(const int op);
55748 +int gr_handle_chroot_setpriority(struct task_struct *p,
55749 + const int niceval);
55750 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
55751 +int gr_handle_chroot_chroot(const struct dentry *dentry,
55752 + const struct vfsmount *mnt);
55753 +int gr_handle_chroot_caps(struct path *path);
55754 +void gr_handle_chroot_chdir(struct path *path);
55755 +int gr_handle_chroot_chmod(const struct dentry *dentry,
55756 + const struct vfsmount *mnt, const int mode);
55757 +int gr_handle_chroot_mknod(const struct dentry *dentry,
55758 + const struct vfsmount *mnt, const int mode);
55759 +int gr_handle_chroot_mount(const struct dentry *dentry,
55760 + const struct vfsmount *mnt,
55761 + const char *dev_name);
55762 +int gr_handle_chroot_pivot(void);
55763 +int gr_handle_chroot_unix(const pid_t pid);
55764 +
55765 +int gr_handle_rawio(const struct inode *inode);
55766 +int gr_handle_nproc(void);
55767 +
55768 +void gr_handle_ioperm(void);
55769 +void gr_handle_iopl(void);
55770 +
55771 +int gr_tpe_allow(const struct file *file);
55772 +
55773 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
55774 +void gr_clear_chroot_entries(struct task_struct *task);
55775 +
55776 +void gr_log_forkfail(const int retval);
55777 +void gr_log_timechange(void);
55778 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
55779 +void gr_log_chdir(const struct dentry *dentry,
55780 + const struct vfsmount *mnt);
55781 +void gr_log_chroot_exec(const struct dentry *dentry,
55782 + const struct vfsmount *mnt);
55783 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
55784 +#ifdef CONFIG_COMPAT
55785 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
55786 +#endif
55787 +void gr_log_remount(const char *devname, const int retval);
55788 +void gr_log_unmount(const char *devname, const int retval);
55789 +void gr_log_mount(const char *from, const char *to, const int retval);
55790 +void gr_log_textrel(struct vm_area_struct *vma);
55791 +void gr_log_rwxmmap(struct file *file);
55792 +void gr_log_rwxmprotect(struct file *file);
55793 +
55794 +int gr_handle_follow_link(const struct inode *parent,
55795 + const struct inode *inode,
55796 + const struct dentry *dentry,
55797 + const struct vfsmount *mnt);
55798 +int gr_handle_fifo(const struct dentry *dentry,
55799 + const struct vfsmount *mnt,
55800 + const struct dentry *dir, const int flag,
55801 + const int acc_mode);
55802 +int gr_handle_hardlink(const struct dentry *dentry,
55803 + const struct vfsmount *mnt,
55804 + struct inode *inode,
55805 + const int mode, const char *to);
55806 +
55807 +int gr_is_capable(const int cap);
55808 +int gr_is_capable_nolog(const int cap);
55809 +void gr_learn_resource(const struct task_struct *task, const int limit,
55810 + const unsigned long wanted, const int gt);
55811 +void gr_copy_label(struct task_struct *tsk);
55812 +void gr_handle_crash(struct task_struct *task, const int sig);
55813 +int gr_handle_signal(const struct task_struct *p, const int sig);
55814 +int gr_check_crash_uid(const uid_t uid);
55815 +int gr_check_protected_task(const struct task_struct *task);
55816 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
55817 +int gr_acl_handle_mmap(const struct file *file,
55818 + const unsigned long prot);
55819 +int gr_acl_handle_mprotect(const struct file *file,
55820 + const unsigned long prot);
55821 +int gr_check_hidden_task(const struct task_struct *tsk);
55822 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
55823 + const struct vfsmount *mnt);
55824 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
55825 + const struct vfsmount *mnt);
55826 +__u32 gr_acl_handle_access(const struct dentry *dentry,
55827 + const struct vfsmount *mnt, const int fmode);
55828 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
55829 + const struct vfsmount *mnt, mode_t mode);
55830 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
55831 + const struct vfsmount *mnt, mode_t mode);
55832 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
55833 + const struct vfsmount *mnt);
55834 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
55835 + const struct vfsmount *mnt);
55836 +int gr_handle_ptrace(struct task_struct *task, const long request);
55837 +int gr_handle_proc_ptrace(struct task_struct *task);
55838 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
55839 + const struct vfsmount *mnt);
55840 +int gr_check_crash_exec(const struct file *filp);
55841 +int gr_acl_is_enabled(void);
55842 +void gr_set_kernel_label(struct task_struct *task);
55843 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
55844 + const gid_t gid);
55845 +int gr_set_proc_label(const struct dentry *dentry,
55846 + const struct vfsmount *mnt,
55847 + const int unsafe_share);
55848 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
55849 + const struct vfsmount *mnt);
55850 +__u32 gr_acl_handle_open(const struct dentry *dentry,
55851 + const struct vfsmount *mnt, const int fmode);
55852 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
55853 + const struct dentry *p_dentry,
55854 + const struct vfsmount *p_mnt, const int fmode,
55855 + const int imode);
55856 +void gr_handle_create(const struct dentry *dentry,
55857 + const struct vfsmount *mnt);
55858 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
55859 + const struct dentry *parent_dentry,
55860 + const struct vfsmount *parent_mnt,
55861 + const int mode);
55862 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
55863 + const struct dentry *parent_dentry,
55864 + const struct vfsmount *parent_mnt);
55865 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
55866 + const struct vfsmount *mnt);
55867 +void gr_handle_delete(const ino_t ino, const dev_t dev);
55868 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
55869 + const struct vfsmount *mnt);
55870 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
55871 + const struct dentry *parent_dentry,
55872 + const struct vfsmount *parent_mnt,
55873 + const char *from);
55874 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
55875 + const struct dentry *parent_dentry,
55876 + const struct vfsmount *parent_mnt,
55877 + const struct dentry *old_dentry,
55878 + const struct vfsmount *old_mnt, const char *to);
55879 +int gr_acl_handle_rename(struct dentry *new_dentry,
55880 + struct dentry *parent_dentry,
55881 + const struct vfsmount *parent_mnt,
55882 + struct dentry *old_dentry,
55883 + struct inode *old_parent_inode,
55884 + struct vfsmount *old_mnt, const char *newname);
55885 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
55886 + struct dentry *old_dentry,
55887 + struct dentry *new_dentry,
55888 + struct vfsmount *mnt, const __u8 replace);
55889 +__u32 gr_check_link(const struct dentry *new_dentry,
55890 + const struct dentry *parent_dentry,
55891 + const struct vfsmount *parent_mnt,
55892 + const struct dentry *old_dentry,
55893 + const struct vfsmount *old_mnt);
55894 +int gr_acl_handle_filldir(const struct file *file, const char *name,
55895 + const unsigned int namelen, const ino_t ino);
55896 +
55897 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
55898 + const struct vfsmount *mnt);
55899 +void gr_acl_handle_exit(void);
55900 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
55901 +int gr_acl_handle_procpidmem(const struct task_struct *task);
55902 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
55903 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
55904 +void gr_audit_ptrace(struct task_struct *task);
55905 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
55906 +
55907 +#ifdef CONFIG_GRKERNSEC
55908 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
55909 +void gr_handle_vm86(void);
55910 +void gr_handle_mem_readwrite(u64 from, u64 to);
55911 +
55912 +extern int grsec_enable_dmesg;
55913 +extern int grsec_disable_privio;
55914 +#endif
55915 +
55916 +#endif
55917 diff -urNp linux-2.6.32.42/include/linux/hdpu_features.h linux-2.6.32.42/include/linux/hdpu_features.h
55918 --- linux-2.6.32.42/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
55919 +++ linux-2.6.32.42/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
55920 @@ -3,7 +3,7 @@
55921 struct cpustate_t {
55922 spinlock_t lock;
55923 int excl;
55924 - int open_count;
55925 + atomic_t open_count;
55926 unsigned char cached_val;
55927 int inited;
55928 unsigned long *set_addr;
55929 diff -urNp linux-2.6.32.42/include/linux/highmem.h linux-2.6.32.42/include/linux/highmem.h
55930 --- linux-2.6.32.42/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
55931 +++ linux-2.6.32.42/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
55932 @@ -137,6 +137,18 @@ static inline void clear_highpage(struct
55933 kunmap_atomic(kaddr, KM_USER0);
55934 }
55935
55936 +static inline void sanitize_highpage(struct page *page)
55937 +{
55938 + void *kaddr;
55939 + unsigned long flags;
55940 +
55941 + local_irq_save(flags);
55942 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
55943 + clear_page(kaddr);
55944 + kunmap_atomic(kaddr, KM_CLEARPAGE);
55945 + local_irq_restore(flags);
55946 +}
55947 +
55948 static inline void zero_user_segments(struct page *page,
55949 unsigned start1, unsigned end1,
55950 unsigned start2, unsigned end2)
55951 diff -urNp linux-2.6.32.42/include/linux/i2o.h linux-2.6.32.42/include/linux/i2o.h
55952 --- linux-2.6.32.42/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
55953 +++ linux-2.6.32.42/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
55954 @@ -564,7 +564,7 @@ struct i2o_controller {
55955 struct i2o_device *exec; /* Executive */
55956 #if BITS_PER_LONG == 64
55957 spinlock_t context_list_lock; /* lock for context_list */
55958 - atomic_t context_list_counter; /* needed for unique contexts */
55959 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
55960 struct list_head context_list; /* list of context id's
55961 and pointers */
55962 #endif
55963 diff -urNp linux-2.6.32.42/include/linux/init_task.h linux-2.6.32.42/include/linux/init_task.h
55964 --- linux-2.6.32.42/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
55965 +++ linux-2.6.32.42/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
55966 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
55967 #define INIT_IDS
55968 #endif
55969
55970 +#ifdef CONFIG_X86
55971 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
55972 +#else
55973 +#define INIT_TASK_THREAD_INFO
55974 +#endif
55975 +
55976 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
55977 /*
55978 * Because of the reduced scope of CAP_SETPCAP when filesystem
55979 @@ -156,6 +162,7 @@ extern struct cred init_cred;
55980 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
55981 .comm = "swapper", \
55982 .thread = INIT_THREAD, \
55983 + INIT_TASK_THREAD_INFO \
55984 .fs = &init_fs, \
55985 .files = &init_files, \
55986 .signal = &init_signals, \
55987 diff -urNp linux-2.6.32.42/include/linux/interrupt.h linux-2.6.32.42/include/linux/interrupt.h
55988 --- linux-2.6.32.42/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
55989 +++ linux-2.6.32.42/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
55990 @@ -363,7 +363,7 @@ enum
55991 /* map softirq index to softirq name. update 'softirq_to_name' in
55992 * kernel/softirq.c when adding a new softirq.
55993 */
55994 -extern char *softirq_to_name[NR_SOFTIRQS];
55995 +extern const char * const softirq_to_name[NR_SOFTIRQS];
55996
55997 /* softirq mask and active fields moved to irq_cpustat_t in
55998 * asm/hardirq.h to get better cache usage. KAO
55999 @@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
56000
56001 struct softirq_action
56002 {
56003 - void (*action)(struct softirq_action *);
56004 + void (*action)(void);
56005 };
56006
56007 asmlinkage void do_softirq(void);
56008 asmlinkage void __do_softirq(void);
56009 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
56010 +extern void open_softirq(int nr, void (*action)(void));
56011 extern void softirq_init(void);
56012 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
56013 extern void raise_softirq_irqoff(unsigned int nr);
56014 diff -urNp linux-2.6.32.42/include/linux/irq.h linux-2.6.32.42/include/linux/irq.h
56015 --- linux-2.6.32.42/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
56016 +++ linux-2.6.32.42/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
56017 @@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
56018 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
56019 bool boot)
56020 {
56021 +#ifdef CONFIG_CPUMASK_OFFSTACK
56022 gfp_t gfp = GFP_ATOMIC;
56023
56024 if (boot)
56025 gfp = GFP_NOWAIT;
56026
56027 -#ifdef CONFIG_CPUMASK_OFFSTACK
56028 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
56029 return false;
56030
56031 diff -urNp linux-2.6.32.42/include/linux/kallsyms.h linux-2.6.32.42/include/linux/kallsyms.h
56032 --- linux-2.6.32.42/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
56033 +++ linux-2.6.32.42/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
56034 @@ -15,7 +15,8 @@
56035
56036 struct module;
56037
56038 -#ifdef CONFIG_KALLSYMS
56039 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
56040 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
56041 /* Lookup the address for a symbol. Returns 0 if not found. */
56042 unsigned long kallsyms_lookup_name(const char *name);
56043
56044 @@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
56045 /* Stupid that this does nothing, but I didn't create this mess. */
56046 #define __print_symbol(fmt, addr)
56047 #endif /*CONFIG_KALLSYMS*/
56048 +#else /* when included by kallsyms.c, vsnprintf.c, or
56049 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
56050 +extern void __print_symbol(const char *fmt, unsigned long address);
56051 +extern int sprint_symbol(char *buffer, unsigned long address);
56052 +const char *kallsyms_lookup(unsigned long addr,
56053 + unsigned long *symbolsize,
56054 + unsigned long *offset,
56055 + char **modname, char *namebuf);
56056 +#endif
56057
56058 /* This macro allows us to keep printk typechecking */
56059 static void __check_printsym_format(const char *fmt, ...)
56060 diff -urNp linux-2.6.32.42/include/linux/kgdb.h linux-2.6.32.42/include/linux/kgdb.h
56061 --- linux-2.6.32.42/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
56062 +++ linux-2.6.32.42/include/linux/kgdb.h 2011-05-04 17:56:20.000000000 -0400
56063 @@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
56064
56065 extern int kgdb_connected;
56066
56067 -extern atomic_t kgdb_setting_breakpoint;
56068 -extern atomic_t kgdb_cpu_doing_single_step;
56069 +extern atomic_unchecked_t kgdb_setting_breakpoint;
56070 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
56071
56072 extern struct task_struct *kgdb_usethread;
56073 extern struct task_struct *kgdb_contthread;
56074 @@ -251,20 +251,20 @@ struct kgdb_arch {
56075 */
56076 struct kgdb_io {
56077 const char *name;
56078 - int (*read_char) (void);
56079 - void (*write_char) (u8);
56080 - void (*flush) (void);
56081 - int (*init) (void);
56082 - void (*pre_exception) (void);
56083 - void (*post_exception) (void);
56084 + int (* const read_char) (void);
56085 + void (* const write_char) (u8);
56086 + void (* const flush) (void);
56087 + int (* const init) (void);
56088 + void (* const pre_exception) (void);
56089 + void (* const post_exception) (void);
56090 };
56091
56092 -extern struct kgdb_arch arch_kgdb_ops;
56093 +extern const struct kgdb_arch arch_kgdb_ops;
56094
56095 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
56096
56097 -extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
56098 -extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
56099 +extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
56100 +extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
56101
56102 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
56103 extern int kgdb_mem2hex(char *mem, char *buf, int count);
56104 diff -urNp linux-2.6.32.42/include/linux/kmod.h linux-2.6.32.42/include/linux/kmod.h
56105 --- linux-2.6.32.42/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
56106 +++ linux-2.6.32.42/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
56107 @@ -31,6 +31,8 @@
56108 * usually useless though. */
56109 extern int __request_module(bool wait, const char *name, ...) \
56110 __attribute__((format(printf, 2, 3)));
56111 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
56112 + __attribute__((format(printf, 3, 4)));
56113 #define request_module(mod...) __request_module(true, mod)
56114 #define request_module_nowait(mod...) __request_module(false, mod)
56115 #define try_then_request_module(x, mod...) \
56116 diff -urNp linux-2.6.32.42/include/linux/kobject.h linux-2.6.32.42/include/linux/kobject.h
56117 --- linux-2.6.32.42/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
56118 +++ linux-2.6.32.42/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
56119 @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
56120
56121 struct kobj_type {
56122 void (*release)(struct kobject *kobj);
56123 - struct sysfs_ops *sysfs_ops;
56124 + const struct sysfs_ops *sysfs_ops;
56125 struct attribute **default_attrs;
56126 };
56127
56128 @@ -118,9 +118,9 @@ struct kobj_uevent_env {
56129 };
56130
56131 struct kset_uevent_ops {
56132 - int (*filter)(struct kset *kset, struct kobject *kobj);
56133 - const char *(*name)(struct kset *kset, struct kobject *kobj);
56134 - int (*uevent)(struct kset *kset, struct kobject *kobj,
56135 + int (* const filter)(struct kset *kset, struct kobject *kobj);
56136 + const char *(* const name)(struct kset *kset, struct kobject *kobj);
56137 + int (* const uevent)(struct kset *kset, struct kobject *kobj,
56138 struct kobj_uevent_env *env);
56139 };
56140
56141 @@ -132,7 +132,7 @@ struct kobj_attribute {
56142 const char *buf, size_t count);
56143 };
56144
56145 -extern struct sysfs_ops kobj_sysfs_ops;
56146 +extern const struct sysfs_ops kobj_sysfs_ops;
56147
56148 /**
56149 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
56150 @@ -155,14 +155,14 @@ struct kset {
56151 struct list_head list;
56152 spinlock_t list_lock;
56153 struct kobject kobj;
56154 - struct kset_uevent_ops *uevent_ops;
56155 + const struct kset_uevent_ops *uevent_ops;
56156 };
56157
56158 extern void kset_init(struct kset *kset);
56159 extern int __must_check kset_register(struct kset *kset);
56160 extern void kset_unregister(struct kset *kset);
56161 extern struct kset * __must_check kset_create_and_add(const char *name,
56162 - struct kset_uevent_ops *u,
56163 + const struct kset_uevent_ops *u,
56164 struct kobject *parent_kobj);
56165
56166 static inline struct kset *to_kset(struct kobject *kobj)
56167 diff -urNp linux-2.6.32.42/include/linux/kvm_host.h linux-2.6.32.42/include/linux/kvm_host.h
56168 --- linux-2.6.32.42/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
56169 +++ linux-2.6.32.42/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
56170 @@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
56171 void vcpu_load(struct kvm_vcpu *vcpu);
56172 void vcpu_put(struct kvm_vcpu *vcpu);
56173
56174 -int kvm_init(void *opaque, unsigned int vcpu_size,
56175 +int kvm_init(const void *opaque, unsigned int vcpu_size,
56176 struct module *module);
56177 void kvm_exit(void);
56178
56179 @@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
56180 struct kvm_guest_debug *dbg);
56181 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
56182
56183 -int kvm_arch_init(void *opaque);
56184 +int kvm_arch_init(const void *opaque);
56185 void kvm_arch_exit(void);
56186
56187 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
56188 diff -urNp linux-2.6.32.42/include/linux/libata.h linux-2.6.32.42/include/linux/libata.h
56189 --- linux-2.6.32.42/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
56190 +++ linux-2.6.32.42/include/linux/libata.h 2011-04-23 12:56:11.000000000 -0400
56191 @@ -525,11 +525,11 @@ struct ata_ioports {
56192
56193 struct ata_host {
56194 spinlock_t lock;
56195 - struct device *dev;
56196 + struct device *dev;
56197 void __iomem * const *iomap;
56198 unsigned int n_ports;
56199 void *private_data;
56200 - struct ata_port_operations *ops;
56201 + const struct ata_port_operations *ops;
56202 unsigned long flags;
56203 #ifdef CONFIG_ATA_ACPI
56204 acpi_handle acpi_handle;
56205 @@ -710,7 +710,7 @@ struct ata_link {
56206
56207 struct ata_port {
56208 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
56209 - struct ata_port_operations *ops;
56210 + const struct ata_port_operations *ops;
56211 spinlock_t *lock;
56212 /* Flags owned by the EH context. Only EH should touch these once the
56213 port is active */
56214 @@ -892,7 +892,7 @@ struct ata_port_info {
56215 unsigned long pio_mask;
56216 unsigned long mwdma_mask;
56217 unsigned long udma_mask;
56218 - struct ata_port_operations *port_ops;
56219 + const struct ata_port_operations *port_ops;
56220 void *private_data;
56221 };
56222
56223 @@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
56224 extern const unsigned long sata_deb_timing_hotplug[];
56225 extern const unsigned long sata_deb_timing_long[];
56226
56227 -extern struct ata_port_operations ata_dummy_port_ops;
56228 +extern const struct ata_port_operations ata_dummy_port_ops;
56229 extern const struct ata_port_info ata_dummy_port_info;
56230
56231 static inline const unsigned long *
56232 @@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
56233 struct scsi_host_template *sht);
56234 extern void ata_host_detach(struct ata_host *host);
56235 extern void ata_host_init(struct ata_host *, struct device *,
56236 - unsigned long, struct ata_port_operations *);
56237 + unsigned long, const struct ata_port_operations *);
56238 extern int ata_scsi_detect(struct scsi_host_template *sht);
56239 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
56240 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
56241 diff -urNp linux-2.6.32.42/include/linux/lockd/bind.h linux-2.6.32.42/include/linux/lockd/bind.h
56242 --- linux-2.6.32.42/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
56243 +++ linux-2.6.32.42/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
56244 @@ -23,13 +23,13 @@ struct svc_rqst;
56245 * This is the set of functions for lockd->nfsd communication
56246 */
56247 struct nlmsvc_binding {
56248 - __be32 (*fopen)(struct svc_rqst *,
56249 + __be32 (* const fopen)(struct svc_rqst *,
56250 struct nfs_fh *,
56251 struct file **);
56252 - void (*fclose)(struct file *);
56253 + void (* const fclose)(struct file *);
56254 };
56255
56256 -extern struct nlmsvc_binding * nlmsvc_ops;
56257 +extern const struct nlmsvc_binding * nlmsvc_ops;
56258
56259 /*
56260 * Similar to nfs_client_initdata, but without the NFS-specific
56261 diff -urNp linux-2.6.32.42/include/linux/mm.h linux-2.6.32.42/include/linux/mm.h
56262 --- linux-2.6.32.42/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
56263 +++ linux-2.6.32.42/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
56264 @@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
56265
56266 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
56267 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
56268 +
56269 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
56270 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
56271 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
56272 +#else
56273 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
56274 +#endif
56275 +
56276 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
56277 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
56278
56279 @@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
56280 int set_page_dirty_lock(struct page *page);
56281 int clear_page_dirty_for_io(struct page *page);
56282
56283 -/* Is the vma a continuation of the stack vma above it? */
56284 -static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
56285 -{
56286 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
56287 -}
56288 -
56289 extern unsigned long move_page_tables(struct vm_area_struct *vma,
56290 unsigned long old_addr, struct vm_area_struct *new_vma,
56291 unsigned long new_addr, unsigned long len);
56292 @@ -890,6 +891,8 @@ struct shrinker {
56293 extern void register_shrinker(struct shrinker *);
56294 extern void unregister_shrinker(struct shrinker *);
56295
56296 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
56297 +
56298 int vma_wants_writenotify(struct vm_area_struct *vma);
56299
56300 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
56301 @@ -1162,6 +1165,7 @@ out:
56302 }
56303
56304 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
56305 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
56306
56307 extern unsigned long do_brk(unsigned long, unsigned long);
56308
56309 @@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
56310 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
56311 struct vm_area_struct **pprev);
56312
56313 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
56314 +extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
56315 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
56316 +
56317 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
56318 NULL if none. Assume start_addr < end_addr. */
56319 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
56320 @@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
56321 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
56322 }
56323
56324 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
56325 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
56326 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
56327 unsigned long pfn, unsigned long size, pgprot_t);
56328 @@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
56329 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
56330 extern int sysctl_memory_failure_early_kill;
56331 extern int sysctl_memory_failure_recovery;
56332 -extern atomic_long_t mce_bad_pages;
56333 +extern atomic_long_unchecked_t mce_bad_pages;
56334 +
56335 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56336 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
56337 +#else
56338 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
56339 +#endif
56340
56341 #endif /* __KERNEL__ */
56342 #endif /* _LINUX_MM_H */
56343 diff -urNp linux-2.6.32.42/include/linux/mm_types.h linux-2.6.32.42/include/linux/mm_types.h
56344 --- linux-2.6.32.42/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
56345 +++ linux-2.6.32.42/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
56346 @@ -186,6 +186,8 @@ struct vm_area_struct {
56347 #ifdef CONFIG_NUMA
56348 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
56349 #endif
56350 +
56351 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
56352 };
56353
56354 struct core_thread {
56355 @@ -287,6 +289,24 @@ struct mm_struct {
56356 #ifdef CONFIG_MMU_NOTIFIER
56357 struct mmu_notifier_mm *mmu_notifier_mm;
56358 #endif
56359 +
56360 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
56361 + unsigned long pax_flags;
56362 +#endif
56363 +
56364 +#ifdef CONFIG_PAX_DLRESOLVE
56365 + unsigned long call_dl_resolve;
56366 +#endif
56367 +
56368 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
56369 + unsigned long call_syscall;
56370 +#endif
56371 +
56372 +#ifdef CONFIG_PAX_ASLR
56373 + unsigned long delta_mmap; /* randomized offset */
56374 + unsigned long delta_stack; /* randomized offset */
56375 +#endif
56376 +
56377 };
56378
56379 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
56380 diff -urNp linux-2.6.32.42/include/linux/mmu_notifier.h linux-2.6.32.42/include/linux/mmu_notifier.h
56381 --- linux-2.6.32.42/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
56382 +++ linux-2.6.32.42/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
56383 @@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
56384 */
56385 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
56386 ({ \
56387 - pte_t __pte; \
56388 + pte_t ___pte; \
56389 struct vm_area_struct *___vma = __vma; \
56390 unsigned long ___address = __address; \
56391 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
56392 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
56393 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
56394 - __pte; \
56395 + ___pte; \
56396 })
56397
56398 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
56399 diff -urNp linux-2.6.32.42/include/linux/mmzone.h linux-2.6.32.42/include/linux/mmzone.h
56400 --- linux-2.6.32.42/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
56401 +++ linux-2.6.32.42/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
56402 @@ -350,7 +350,7 @@ struct zone {
56403 unsigned long flags; /* zone flags, see below */
56404
56405 /* Zone statistics */
56406 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56407 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
56408
56409 /*
56410 * prev_priority holds the scanning priority for this zone. It is
56411 diff -urNp linux-2.6.32.42/include/linux/mod_devicetable.h linux-2.6.32.42/include/linux/mod_devicetable.h
56412 --- linux-2.6.32.42/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
56413 +++ linux-2.6.32.42/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
56414 @@ -12,7 +12,7 @@
56415 typedef unsigned long kernel_ulong_t;
56416 #endif
56417
56418 -#define PCI_ANY_ID (~0)
56419 +#define PCI_ANY_ID ((__u16)~0)
56420
56421 struct pci_device_id {
56422 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
56423 @@ -131,7 +131,7 @@ struct usb_device_id {
56424 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
56425 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
56426
56427 -#define HID_ANY_ID (~0)
56428 +#define HID_ANY_ID (~0U)
56429
56430 struct hid_device_id {
56431 __u16 bus;
56432 diff -urNp linux-2.6.32.42/include/linux/module.h linux-2.6.32.42/include/linux/module.h
56433 --- linux-2.6.32.42/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
56434 +++ linux-2.6.32.42/include/linux/module.h 2011-04-17 15:56:46.000000000 -0400
56435 @@ -287,16 +287,16 @@ struct module
56436 int (*init)(void);
56437
56438 /* If this is non-NULL, vfree after init() returns */
56439 - void *module_init;
56440 + void *module_init_rx, *module_init_rw;
56441
56442 /* Here is the actual code + data, vfree'd on unload. */
56443 - void *module_core;
56444 + void *module_core_rx, *module_core_rw;
56445
56446 /* Here are the sizes of the init and core sections */
56447 - unsigned int init_size, core_size;
56448 + unsigned int init_size_rw, core_size_rw;
56449
56450 /* The size of the executable code in each section. */
56451 - unsigned int init_text_size, core_text_size;
56452 + unsigned int init_size_rx, core_size_rx;
56453
56454 /* Arch-specific module values */
56455 struct mod_arch_specific arch;
56456 @@ -393,16 +393,46 @@ struct module *__module_address(unsigned
56457 bool is_module_address(unsigned long addr);
56458 bool is_module_text_address(unsigned long addr);
56459
56460 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
56461 +{
56462 +
56463 +#ifdef CONFIG_PAX_KERNEXEC
56464 + if (ktla_ktva(addr) >= (unsigned long)start &&
56465 + ktla_ktva(addr) < (unsigned long)start + size)
56466 + return 1;
56467 +#endif
56468 +
56469 + return ((void *)addr >= start && (void *)addr < start + size);
56470 +}
56471 +
56472 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
56473 +{
56474 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
56475 +}
56476 +
56477 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
56478 +{
56479 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
56480 +}
56481 +
56482 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
56483 +{
56484 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
56485 +}
56486 +
56487 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
56488 +{
56489 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
56490 +}
56491 +
56492 static inline int within_module_core(unsigned long addr, struct module *mod)
56493 {
56494 - return (unsigned long)mod->module_core <= addr &&
56495 - addr < (unsigned long)mod->module_core + mod->core_size;
56496 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
56497 }
56498
56499 static inline int within_module_init(unsigned long addr, struct module *mod)
56500 {
56501 - return (unsigned long)mod->module_init <= addr &&
56502 - addr < (unsigned long)mod->module_init + mod->init_size;
56503 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
56504 }
56505
56506 /* Search for module by name: must hold module_mutex. */
56507 diff -urNp linux-2.6.32.42/include/linux/moduleloader.h linux-2.6.32.42/include/linux/moduleloader.h
56508 --- linux-2.6.32.42/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
56509 +++ linux-2.6.32.42/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
56510 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
56511 sections. Returns NULL on failure. */
56512 void *module_alloc(unsigned long size);
56513
56514 +#ifdef CONFIG_PAX_KERNEXEC
56515 +void *module_alloc_exec(unsigned long size);
56516 +#else
56517 +#define module_alloc_exec(x) module_alloc(x)
56518 +#endif
56519 +
56520 /* Free memory returned from module_alloc. */
56521 void module_free(struct module *mod, void *module_region);
56522
56523 +#ifdef CONFIG_PAX_KERNEXEC
56524 +void module_free_exec(struct module *mod, void *module_region);
56525 +#else
56526 +#define module_free_exec(x, y) module_free((x), (y))
56527 +#endif
56528 +
56529 /* Apply the given relocation to the (simplified) ELF. Return -error
56530 or 0. */
56531 int apply_relocate(Elf_Shdr *sechdrs,
56532 diff -urNp linux-2.6.32.42/include/linux/moduleparam.h linux-2.6.32.42/include/linux/moduleparam.h
56533 --- linux-2.6.32.42/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
56534 +++ linux-2.6.32.42/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
56535 @@ -132,7 +132,7 @@ struct kparam_array
56536
56537 /* Actually copy string: maxlen param is usually sizeof(string). */
56538 #define module_param_string(name, string, len, perm) \
56539 - static const struct kparam_string __param_string_##name \
56540 + static const struct kparam_string __param_string_##name __used \
56541 = { len, string }; \
56542 __module_param_call(MODULE_PARAM_PREFIX, name, \
56543 param_set_copystring, param_get_string, \
56544 @@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
56545
56546 /* Comma-separated array: *nump is set to number they actually specified. */
56547 #define module_param_array_named(name, array, type, nump, perm) \
56548 - static const struct kparam_array __param_arr_##name \
56549 + static const struct kparam_array __param_arr_##name __used \
56550 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
56551 sizeof(array[0]), array }; \
56552 __module_param_call(MODULE_PARAM_PREFIX, name, \
56553 diff -urNp linux-2.6.32.42/include/linux/mutex.h linux-2.6.32.42/include/linux/mutex.h
56554 --- linux-2.6.32.42/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
56555 +++ linux-2.6.32.42/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
56556 @@ -51,7 +51,7 @@ struct mutex {
56557 spinlock_t wait_lock;
56558 struct list_head wait_list;
56559 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
56560 - struct thread_info *owner;
56561 + struct task_struct *owner;
56562 #endif
56563 #ifdef CONFIG_DEBUG_MUTEXES
56564 const char *name;
56565 diff -urNp linux-2.6.32.42/include/linux/namei.h linux-2.6.32.42/include/linux/namei.h
56566 --- linux-2.6.32.42/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
56567 +++ linux-2.6.32.42/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
56568 @@ -22,7 +22,7 @@ struct nameidata {
56569 unsigned int flags;
56570 int last_type;
56571 unsigned depth;
56572 - char *saved_names[MAX_NESTED_LINKS + 1];
56573 + const char *saved_names[MAX_NESTED_LINKS + 1];
56574
56575 /* Intent data */
56576 union {
56577 @@ -84,12 +84,12 @@ extern int follow_up(struct path *);
56578 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
56579 extern void unlock_rename(struct dentry *, struct dentry *);
56580
56581 -static inline void nd_set_link(struct nameidata *nd, char *path)
56582 +static inline void nd_set_link(struct nameidata *nd, const char *path)
56583 {
56584 nd->saved_names[nd->depth] = path;
56585 }
56586
56587 -static inline char *nd_get_link(struct nameidata *nd)
56588 +static inline const char *nd_get_link(const struct nameidata *nd)
56589 {
56590 return nd->saved_names[nd->depth];
56591 }
56592 diff -urNp linux-2.6.32.42/include/linux/netfilter/xt_gradm.h linux-2.6.32.42/include/linux/netfilter/xt_gradm.h
56593 --- linux-2.6.32.42/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
56594 +++ linux-2.6.32.42/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
56595 @@ -0,0 +1,9 @@
56596 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
56597 +#define _LINUX_NETFILTER_XT_GRADM_H 1
56598 +
56599 +struct xt_gradm_mtinfo {
56600 + __u16 flags;
56601 + __u16 invflags;
56602 +};
56603 +
56604 +#endif
56605 diff -urNp linux-2.6.32.42/include/linux/nodemask.h linux-2.6.32.42/include/linux/nodemask.h
56606 --- linux-2.6.32.42/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
56607 +++ linux-2.6.32.42/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
56608 @@ -464,11 +464,11 @@ static inline int num_node_state(enum no
56609
56610 #define any_online_node(mask) \
56611 ({ \
56612 - int node; \
56613 - for_each_node_mask(node, (mask)) \
56614 - if (node_online(node)) \
56615 + int __node; \
56616 + for_each_node_mask(__node, (mask)) \
56617 + if (node_online(__node)) \
56618 break; \
56619 - node; \
56620 + __node; \
56621 })
56622
56623 #define num_online_nodes() num_node_state(N_ONLINE)
56624 diff -urNp linux-2.6.32.42/include/linux/oprofile.h linux-2.6.32.42/include/linux/oprofile.h
56625 --- linux-2.6.32.42/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
56626 +++ linux-2.6.32.42/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
56627 @@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
56628 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
56629 char const * name, ulong * val);
56630
56631 -/** Create a file for read-only access to an atomic_t. */
56632 +/** Create a file for read-only access to an atomic_unchecked_t. */
56633 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
56634 - char const * name, atomic_t * val);
56635 + char const * name, atomic_unchecked_t * val);
56636
56637 /** create a directory */
56638 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
56639 diff -urNp linux-2.6.32.42/include/linux/perf_event.h linux-2.6.32.42/include/linux/perf_event.h
56640 --- linux-2.6.32.42/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
56641 +++ linux-2.6.32.42/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
56642 @@ -476,7 +476,7 @@ struct hw_perf_event {
56643 struct hrtimer hrtimer;
56644 };
56645 };
56646 - atomic64_t prev_count;
56647 + atomic64_unchecked_t prev_count;
56648 u64 sample_period;
56649 u64 last_period;
56650 atomic64_t period_left;
56651 @@ -557,7 +557,7 @@ struct perf_event {
56652 const struct pmu *pmu;
56653
56654 enum perf_event_active_state state;
56655 - atomic64_t count;
56656 + atomic64_unchecked_t count;
56657
56658 /*
56659 * These are the total time in nanoseconds that the event
56660 @@ -595,8 +595,8 @@ struct perf_event {
56661 * These accumulate total time (in nanoseconds) that children
56662 * events have been enabled and running, respectively.
56663 */
56664 - atomic64_t child_total_time_enabled;
56665 - atomic64_t child_total_time_running;
56666 + atomic64_unchecked_t child_total_time_enabled;
56667 + atomic64_unchecked_t child_total_time_running;
56668
56669 /*
56670 * Protect attach/detach and child_list:
56671 diff -urNp linux-2.6.32.42/include/linux/pipe_fs_i.h linux-2.6.32.42/include/linux/pipe_fs_i.h
56672 --- linux-2.6.32.42/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
56673 +++ linux-2.6.32.42/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
56674 @@ -46,9 +46,9 @@ struct pipe_inode_info {
56675 wait_queue_head_t wait;
56676 unsigned int nrbufs, curbuf;
56677 struct page *tmp_page;
56678 - unsigned int readers;
56679 - unsigned int writers;
56680 - unsigned int waiting_writers;
56681 + atomic_t readers;
56682 + atomic_t writers;
56683 + atomic_t waiting_writers;
56684 unsigned int r_counter;
56685 unsigned int w_counter;
56686 struct fasync_struct *fasync_readers;
56687 diff -urNp linux-2.6.32.42/include/linux/poison.h linux-2.6.32.42/include/linux/poison.h
56688 --- linux-2.6.32.42/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
56689 +++ linux-2.6.32.42/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
56690 @@ -19,8 +19,8 @@
56691 * under normal circumstances, used to verify that nobody uses
56692 * non-initialized list entries.
56693 */
56694 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
56695 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
56696 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
56697 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
56698
56699 /********** include/linux/timer.h **********/
56700 /*
56701 diff -urNp linux-2.6.32.42/include/linux/proc_fs.h linux-2.6.32.42/include/linux/proc_fs.h
56702 --- linux-2.6.32.42/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
56703 +++ linux-2.6.32.42/include/linux/proc_fs.h 2011-04-17 15:56:46.000000000 -0400
56704 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
56705 return proc_create_data(name, mode, parent, proc_fops, NULL);
56706 }
56707
56708 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
56709 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
56710 +{
56711 +#ifdef CONFIG_GRKERNSEC_PROC_USER
56712 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
56713 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56714 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
56715 +#else
56716 + return proc_create_data(name, mode, parent, proc_fops, NULL);
56717 +#endif
56718 +}
56719 +
56720 +
56721 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
56722 mode_t mode, struct proc_dir_entry *base,
56723 read_proc_t *read_proc, void * data)
56724 diff -urNp linux-2.6.32.42/include/linux/ptrace.h linux-2.6.32.42/include/linux/ptrace.h
56725 --- linux-2.6.32.42/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
56726 +++ linux-2.6.32.42/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
56727 @@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
56728 extern void exit_ptrace(struct task_struct *tracer);
56729 #define PTRACE_MODE_READ 1
56730 #define PTRACE_MODE_ATTACH 2
56731 -/* Returns 0 on success, -errno on denial. */
56732 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
56733 /* Returns true on success, false on denial. */
56734 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
56735 +/* Returns true on success, false on denial. */
56736 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
56737
56738 static inline int ptrace_reparented(struct task_struct *child)
56739 {
56740 diff -urNp linux-2.6.32.42/include/linux/random.h linux-2.6.32.42/include/linux/random.h
56741 --- linux-2.6.32.42/include/linux/random.h 2011-03-27 14:31:47.000000000 -0400
56742 +++ linux-2.6.32.42/include/linux/random.h 2011-04-17 15:56:46.000000000 -0400
56743 @@ -74,6 +74,11 @@ unsigned long randomize_range(unsigned l
56744 u32 random32(void);
56745 void srandom32(u32 seed);
56746
56747 +static inline unsigned long pax_get_random_long(void)
56748 +{
56749 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
56750 +}
56751 +
56752 #endif /* __KERNEL___ */
56753
56754 #endif /* _LINUX_RANDOM_H */
56755 diff -urNp linux-2.6.32.42/include/linux/reboot.h linux-2.6.32.42/include/linux/reboot.h
56756 --- linux-2.6.32.42/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
56757 +++ linux-2.6.32.42/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
56758 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
56759 * Architecture-specific implementations of sys_reboot commands.
56760 */
56761
56762 -extern void machine_restart(char *cmd);
56763 -extern void machine_halt(void);
56764 -extern void machine_power_off(void);
56765 +extern void machine_restart(char *cmd) __noreturn;
56766 +extern void machine_halt(void) __noreturn;
56767 +extern void machine_power_off(void) __noreturn;
56768
56769 extern void machine_shutdown(void);
56770 struct pt_regs;
56771 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
56772 */
56773
56774 extern void kernel_restart_prepare(char *cmd);
56775 -extern void kernel_restart(char *cmd);
56776 -extern void kernel_halt(void);
56777 -extern void kernel_power_off(void);
56778 +extern void kernel_restart(char *cmd) __noreturn;
56779 +extern void kernel_halt(void) __noreturn;
56780 +extern void kernel_power_off(void) __noreturn;
56781
56782 void ctrl_alt_del(void);
56783
56784 @@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
56785 * Emergency restart, callable from an interrupt handler.
56786 */
56787
56788 -extern void emergency_restart(void);
56789 +extern void emergency_restart(void) __noreturn;
56790 #include <asm/emergency-restart.h>
56791
56792 #endif
56793 diff -urNp linux-2.6.32.42/include/linux/reiserfs_fs.h linux-2.6.32.42/include/linux/reiserfs_fs.h
56794 --- linux-2.6.32.42/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
56795 +++ linux-2.6.32.42/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
56796 @@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
56797 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
56798
56799 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
56800 -#define get_generation(s) atomic_read (&fs_generation(s))
56801 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
56802 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
56803 #define __fs_changed(gen,s) (gen != get_generation (s))
56804 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
56805 @@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
56806 */
56807
56808 struct item_operations {
56809 - int (*bytes_number) (struct item_head * ih, int block_size);
56810 - void (*decrement_key) (struct cpu_key *);
56811 - int (*is_left_mergeable) (struct reiserfs_key * ih,
56812 + int (* const bytes_number) (struct item_head * ih, int block_size);
56813 + void (* const decrement_key) (struct cpu_key *);
56814 + int (* const is_left_mergeable) (struct reiserfs_key * ih,
56815 unsigned long bsize);
56816 - void (*print_item) (struct item_head *, char *item);
56817 - void (*check_item) (struct item_head *, char *item);
56818 + void (* const print_item) (struct item_head *, char *item);
56819 + void (* const check_item) (struct item_head *, char *item);
56820
56821 - int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
56822 + int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
56823 int is_affected, int insert_size);
56824 - int (*check_left) (struct virtual_item * vi, int free,
56825 + int (* const check_left) (struct virtual_item * vi, int free,
56826 int start_skip, int end_skip);
56827 - int (*check_right) (struct virtual_item * vi, int free);
56828 - int (*part_size) (struct virtual_item * vi, int from, int to);
56829 - int (*unit_num) (struct virtual_item * vi);
56830 - void (*print_vi) (struct virtual_item * vi);
56831 + int (* const check_right) (struct virtual_item * vi, int free);
56832 + int (* const part_size) (struct virtual_item * vi, int from, int to);
56833 + int (* const unit_num) (struct virtual_item * vi);
56834 + void (* const print_vi) (struct virtual_item * vi);
56835 };
56836
56837 -extern struct item_operations *item_ops[TYPE_ANY + 1];
56838 +extern const struct item_operations * const item_ops[TYPE_ANY + 1];
56839
56840 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
56841 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
56842 diff -urNp linux-2.6.32.42/include/linux/reiserfs_fs_sb.h linux-2.6.32.42/include/linux/reiserfs_fs_sb.h
56843 --- linux-2.6.32.42/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
56844 +++ linux-2.6.32.42/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
56845 @@ -377,7 +377,7 @@ struct reiserfs_sb_info {
56846 /* Comment? -Hans */
56847 wait_queue_head_t s_wait;
56848 /* To be obsoleted soon by per buffer seals.. -Hans */
56849 - atomic_t s_generation_counter; // increased by one every time the
56850 + atomic_unchecked_t s_generation_counter; // increased by one every time the
56851 // tree gets re-balanced
56852 unsigned long s_properties; /* File system properties. Currently holds
56853 on-disk FS format */
56854 diff -urNp linux-2.6.32.42/include/linux/sched.h linux-2.6.32.42/include/linux/sched.h
56855 --- linux-2.6.32.42/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
56856 +++ linux-2.6.32.42/include/linux/sched.h 2011-06-04 20:42:54.000000000 -0400
56857 @@ -101,6 +101,7 @@ struct bio;
56858 struct fs_struct;
56859 struct bts_context;
56860 struct perf_event_context;
56861 +struct linux_binprm;
56862
56863 /*
56864 * List of flags we want to share for kernel threads,
56865 @@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
56866 extern signed long schedule_timeout_uninterruptible(signed long timeout);
56867 asmlinkage void __schedule(void);
56868 asmlinkage void schedule(void);
56869 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
56870 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
56871
56872 struct nsproxy;
56873 struct user_namespace;
56874 @@ -371,9 +372,12 @@ struct user_namespace;
56875 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
56876
56877 extern int sysctl_max_map_count;
56878 +extern unsigned long sysctl_heap_stack_gap;
56879
56880 #include <linux/aio.h>
56881
56882 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
56883 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
56884 extern unsigned long
56885 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
56886 unsigned long, unsigned long);
56887 @@ -666,6 +670,16 @@ struct signal_struct {
56888 struct tty_audit_buf *tty_audit_buf;
56889 #endif
56890
56891 +#ifdef CONFIG_GRKERNSEC
56892 + u32 curr_ip;
56893 + u32 saved_ip;
56894 + u32 gr_saddr;
56895 + u32 gr_daddr;
56896 + u16 gr_sport;
56897 + u16 gr_dport;
56898 + u8 used_accept:1;
56899 +#endif
56900 +
56901 int oom_adj; /* OOM kill score adjustment (bit shift) */
56902 };
56903
56904 @@ -723,6 +737,11 @@ struct user_struct {
56905 struct key *session_keyring; /* UID's default session keyring */
56906 #endif
56907
56908 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
56909 + unsigned int banned;
56910 + unsigned long ban_expires;
56911 +#endif
56912 +
56913 /* Hash table maintenance information */
56914 struct hlist_node uidhash_node;
56915 uid_t uid;
56916 @@ -1328,8 +1347,8 @@ struct task_struct {
56917 struct list_head thread_group;
56918
56919 struct completion *vfork_done; /* for vfork() */
56920 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
56921 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56922 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
56923 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
56924
56925 cputime_t utime, stime, utimescaled, stimescaled;
56926 cputime_t gtime;
56927 @@ -1343,16 +1362,6 @@ struct task_struct {
56928 struct task_cputime cputime_expires;
56929 struct list_head cpu_timers[3];
56930
56931 -/* process credentials */
56932 - const struct cred *real_cred; /* objective and real subjective task
56933 - * credentials (COW) */
56934 - const struct cred *cred; /* effective (overridable) subjective task
56935 - * credentials (COW) */
56936 - struct mutex cred_guard_mutex; /* guard against foreign influences on
56937 - * credential calculations
56938 - * (notably. ptrace) */
56939 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56940 -
56941 char comm[TASK_COMM_LEN]; /* executable name excluding path
56942 - access with [gs]et_task_comm (which lock
56943 it with task_lock())
56944 @@ -1369,6 +1378,10 @@ struct task_struct {
56945 #endif
56946 /* CPU-specific state of this task */
56947 struct thread_struct thread;
56948 +/* thread_info moved to task_struct */
56949 +#ifdef CONFIG_X86
56950 + struct thread_info tinfo;
56951 +#endif
56952 /* filesystem information */
56953 struct fs_struct *fs;
56954 /* open file information */
56955 @@ -1436,6 +1449,15 @@ struct task_struct {
56956 int hardirq_context;
56957 int softirq_context;
56958 #endif
56959 +
56960 +/* process credentials */
56961 + const struct cred *real_cred; /* objective and real subjective task
56962 + * credentials (COW) */
56963 + struct mutex cred_guard_mutex; /* guard against foreign influences on
56964 + * credential calculations
56965 + * (notably. ptrace) */
56966 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
56967 +
56968 #ifdef CONFIG_LOCKDEP
56969 # define MAX_LOCK_DEPTH 48UL
56970 u64 curr_chain_key;
56971 @@ -1456,6 +1478,9 @@ struct task_struct {
56972
56973 struct backing_dev_info *backing_dev_info;
56974
56975 + const struct cred *cred; /* effective (overridable) subjective task
56976 + * credentials (COW) */
56977 +
56978 struct io_context *io_context;
56979
56980 unsigned long ptrace_message;
56981 @@ -1519,6 +1544,21 @@ struct task_struct {
56982 unsigned long default_timer_slack_ns;
56983
56984 struct list_head *scm_work_list;
56985 +
56986 +#ifdef CONFIG_GRKERNSEC
56987 + /* grsecurity */
56988 + struct dentry *gr_chroot_dentry;
56989 + struct acl_subject_label *acl;
56990 + struct acl_role_label *role;
56991 + struct file *exec_file;
56992 + u16 acl_role_id;
56993 + /* is this the task that authenticated to the special role */
56994 + u8 acl_sp_role;
56995 + u8 is_writable;
56996 + u8 brute;
56997 + u8 gr_is_chrooted;
56998 +#endif
56999 +
57000 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
57001 /* Index of current stored adress in ret_stack */
57002 int curr_ret_stack;
57003 @@ -1542,6 +1582,57 @@ struct task_struct {
57004 #endif /* CONFIG_TRACING */
57005 };
57006
57007 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
57008 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
57009 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
57010 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
57011 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
57012 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
57013 +
57014 +#ifdef CONFIG_PAX_SOFTMODE
57015 +extern unsigned int pax_softmode;
57016 +#endif
57017 +
57018 +extern int pax_check_flags(unsigned long *);
57019 +
57020 +/* if tsk != current then task_lock must be held on it */
57021 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57022 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
57023 +{
57024 + if (likely(tsk->mm))
57025 + return tsk->mm->pax_flags;
57026 + else
57027 + return 0UL;
57028 +}
57029 +
57030 +/* if tsk != current then task_lock must be held on it */
57031 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
57032 +{
57033 + if (likely(tsk->mm)) {
57034 + tsk->mm->pax_flags = flags;
57035 + return 0;
57036 + }
57037 + return -EINVAL;
57038 +}
57039 +#endif
57040 +
57041 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57042 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
57043 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
57044 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
57045 +#endif
57046 +
57047 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
57048 +void pax_report_insns(void *pc, void *sp);
57049 +void pax_report_refcount_overflow(struct pt_regs *regs);
57050 +void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type);
57051 +
57052 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
57053 +extern void pax_track_stack(void);
57054 +#else
57055 +static inline void pax_track_stack(void) {}
57056 +#endif
57057 +
57058 /* Future-safe accessor for struct task_struct's cpus_allowed. */
57059 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
57060
57061 @@ -1978,7 +2069,9 @@ void yield(void);
57062 extern struct exec_domain default_exec_domain;
57063
57064 union thread_union {
57065 +#ifndef CONFIG_X86
57066 struct thread_info thread_info;
57067 +#endif
57068 unsigned long stack[THREAD_SIZE/sizeof(long)];
57069 };
57070
57071 @@ -2155,7 +2248,7 @@ extern void __cleanup_sighand(struct sig
57072 extern void exit_itimers(struct signal_struct *);
57073 extern void flush_itimer_signals(void);
57074
57075 -extern NORET_TYPE void do_group_exit(int);
57076 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
57077
57078 extern void daemonize(const char *, ...);
57079 extern int allow_signal(int);
57080 @@ -2284,13 +2377,17 @@ static inline unsigned long *end_of_stac
57081
57082 #endif
57083
57084 -static inline int object_is_on_stack(void *obj)
57085 +static inline int object_starts_on_stack(void *obj)
57086 {
57087 - void *stack = task_stack_page(current);
57088 + const void *stack = task_stack_page(current);
57089
57090 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
57091 }
57092
57093 +#ifdef CONFIG_PAX_USERCOPY
57094 +extern int object_is_on_stack(const void *obj, unsigned long len);
57095 +#endif
57096 +
57097 extern void thread_info_cache_init(void);
57098
57099 #ifdef CONFIG_DEBUG_STACK_USAGE
57100 diff -urNp linux-2.6.32.42/include/linux/screen_info.h linux-2.6.32.42/include/linux/screen_info.h
57101 --- linux-2.6.32.42/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
57102 +++ linux-2.6.32.42/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
57103 @@ -42,7 +42,8 @@ struct screen_info {
57104 __u16 pages; /* 0x32 */
57105 __u16 vesa_attributes; /* 0x34 */
57106 __u32 capabilities; /* 0x36 */
57107 - __u8 _reserved[6]; /* 0x3a */
57108 + __u16 vesapm_size; /* 0x3a */
57109 + __u8 _reserved[4]; /* 0x3c */
57110 } __attribute__((packed));
57111
57112 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
57113 diff -urNp linux-2.6.32.42/include/linux/security.h linux-2.6.32.42/include/linux/security.h
57114 --- linux-2.6.32.42/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
57115 +++ linux-2.6.32.42/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
57116 @@ -34,6 +34,7 @@
57117 #include <linux/key.h>
57118 #include <linux/xfrm.h>
57119 #include <linux/gfp.h>
57120 +#include <linux/grsecurity.h>
57121 #include <net/flow.h>
57122
57123 /* Maximum number of letters for an LSM name string */
57124 diff -urNp linux-2.6.32.42/include/linux/shm.h linux-2.6.32.42/include/linux/shm.h
57125 --- linux-2.6.32.42/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
57126 +++ linux-2.6.32.42/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
57127 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
57128 pid_t shm_cprid;
57129 pid_t shm_lprid;
57130 struct user_struct *mlock_user;
57131 +#ifdef CONFIG_GRKERNSEC
57132 + time_t shm_createtime;
57133 + pid_t shm_lapid;
57134 +#endif
57135 };
57136
57137 /* shm_mode upper byte flags */
57138 diff -urNp linux-2.6.32.42/include/linux/skbuff.h linux-2.6.32.42/include/linux/skbuff.h
57139 --- linux-2.6.32.42/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
57140 +++ linux-2.6.32.42/include/linux/skbuff.h 2011-05-04 17:56:20.000000000 -0400
57141 @@ -544,7 +544,7 @@ static inline union skb_shared_tx *skb_t
57142 */
57143 static inline int skb_queue_empty(const struct sk_buff_head *list)
57144 {
57145 - return list->next == (struct sk_buff *)list;
57146 + return list->next == (const struct sk_buff *)list;
57147 }
57148
57149 /**
57150 @@ -557,7 +557,7 @@ static inline int skb_queue_empty(const
57151 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
57152 const struct sk_buff *skb)
57153 {
57154 - return (skb->next == (struct sk_buff *) list);
57155 + return (skb->next == (const struct sk_buff *) list);
57156 }
57157
57158 /**
57159 @@ -570,7 +570,7 @@ static inline bool skb_queue_is_last(con
57160 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
57161 const struct sk_buff *skb)
57162 {
57163 - return (skb->prev == (struct sk_buff *) list);
57164 + return (skb->prev == (const struct sk_buff *) list);
57165 }
57166
57167 /**
57168 @@ -1367,7 +1367,7 @@ static inline int skb_network_offset(con
57169 * headroom, you should not reduce this.
57170 */
57171 #ifndef NET_SKB_PAD
57172 -#define NET_SKB_PAD 32
57173 +#define NET_SKB_PAD (_AC(32,U))
57174 #endif
57175
57176 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
57177 diff -urNp linux-2.6.32.42/include/linux/slab_def.h linux-2.6.32.42/include/linux/slab_def.h
57178 --- linux-2.6.32.42/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
57179 +++ linux-2.6.32.42/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
57180 @@ -69,10 +69,10 @@ struct kmem_cache {
57181 unsigned long node_allocs;
57182 unsigned long node_frees;
57183 unsigned long node_overflow;
57184 - atomic_t allochit;
57185 - atomic_t allocmiss;
57186 - atomic_t freehit;
57187 - atomic_t freemiss;
57188 + atomic_unchecked_t allochit;
57189 + atomic_unchecked_t allocmiss;
57190 + atomic_unchecked_t freehit;
57191 + atomic_unchecked_t freemiss;
57192
57193 /*
57194 * If debugging is enabled, then the allocator can add additional
57195 diff -urNp linux-2.6.32.42/include/linux/slab.h linux-2.6.32.42/include/linux/slab.h
57196 --- linux-2.6.32.42/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
57197 +++ linux-2.6.32.42/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
57198 @@ -11,12 +11,20 @@
57199
57200 #include <linux/gfp.h>
57201 #include <linux/types.h>
57202 +#include <linux/err.h>
57203
57204 /*
57205 * Flags to pass to kmem_cache_create().
57206 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
57207 */
57208 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
57209 +
57210 +#ifdef CONFIG_PAX_USERCOPY
57211 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
57212 +#else
57213 +#define SLAB_USERCOPY 0x00000000UL
57214 +#endif
57215 +
57216 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
57217 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
57218 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
57219 @@ -82,10 +90,13 @@
57220 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
57221 * Both make kfree a no-op.
57222 */
57223 -#define ZERO_SIZE_PTR ((void *)16)
57224 +#define ZERO_SIZE_PTR \
57225 +({ \
57226 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
57227 + (void *)(-MAX_ERRNO-1L); \
57228 +})
57229
57230 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
57231 - (unsigned long)ZERO_SIZE_PTR)
57232 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
57233
57234 /*
57235 * struct kmem_cache related prototypes
57236 @@ -138,6 +149,7 @@ void * __must_check krealloc(const void
57237 void kfree(const void *);
57238 void kzfree(const void *);
57239 size_t ksize(const void *);
57240 +void check_object_size(const void *ptr, unsigned long n, bool to);
57241
57242 /*
57243 * Allocator specific definitions. These are mainly used to establish optimized
57244 @@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
57245
57246 void __init kmem_cache_init_late(void);
57247
57248 +#define kmalloc(x, y) \
57249 +({ \
57250 + void *___retval; \
57251 + intoverflow_t ___x = (intoverflow_t)x; \
57252 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
57253 + ___retval = NULL; \
57254 + else \
57255 + ___retval = kmalloc((size_t)___x, (y)); \
57256 + ___retval; \
57257 +})
57258 +
57259 +#define kmalloc_node(x, y, z) \
57260 +({ \
57261 + void *___retval; \
57262 + intoverflow_t ___x = (intoverflow_t)x; \
57263 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
57264 + ___retval = NULL; \
57265 + else \
57266 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
57267 + ___retval; \
57268 +})
57269 +
57270 +#define kzalloc(x, y) \
57271 +({ \
57272 + void *___retval; \
57273 + intoverflow_t ___x = (intoverflow_t)x; \
57274 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
57275 + ___retval = NULL; \
57276 + else \
57277 + ___retval = kzalloc((size_t)___x, (y)); \
57278 + ___retval; \
57279 +})
57280 +
57281 #endif /* _LINUX_SLAB_H */
57282 diff -urNp linux-2.6.32.42/include/linux/slub_def.h linux-2.6.32.42/include/linux/slub_def.h
57283 --- linux-2.6.32.42/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
57284 +++ linux-2.6.32.42/include/linux/slub_def.h 2011-04-17 15:56:46.000000000 -0400
57285 @@ -86,7 +86,7 @@ struct kmem_cache {
57286 struct kmem_cache_order_objects max;
57287 struct kmem_cache_order_objects min;
57288 gfp_t allocflags; /* gfp flags to use on each alloc */
57289 - int refcount; /* Refcount for slab cache destroy */
57290 + atomic_t refcount; /* Refcount for slab cache destroy */
57291 void (*ctor)(void *);
57292 int inuse; /* Offset to metadata */
57293 int align; /* Alignment */
57294 diff -urNp linux-2.6.32.42/include/linux/sonet.h linux-2.6.32.42/include/linux/sonet.h
57295 --- linux-2.6.32.42/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
57296 +++ linux-2.6.32.42/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
57297 @@ -61,7 +61,7 @@ struct sonet_stats {
57298 #include <asm/atomic.h>
57299
57300 struct k_sonet_stats {
57301 -#define __HANDLE_ITEM(i) atomic_t i
57302 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
57303 __SONET_ITEMS
57304 #undef __HANDLE_ITEM
57305 };
57306 diff -urNp linux-2.6.32.42/include/linux/sunrpc/clnt.h linux-2.6.32.42/include/linux/sunrpc/clnt.h
57307 --- linux-2.6.32.42/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
57308 +++ linux-2.6.32.42/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
57309 @@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
57310 {
57311 switch (sap->sa_family) {
57312 case AF_INET:
57313 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
57314 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
57315 case AF_INET6:
57316 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
57317 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
57318 }
57319 return 0;
57320 }
57321 @@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
57322 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
57323 const struct sockaddr *src)
57324 {
57325 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
57326 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
57327 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
57328
57329 dsin->sin_family = ssin->sin_family;
57330 @@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
57331 if (sa->sa_family != AF_INET6)
57332 return 0;
57333
57334 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
57335 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
57336 }
57337
57338 #endif /* __KERNEL__ */
57339 diff -urNp linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h
57340 --- linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
57341 +++ linux-2.6.32.42/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
57342 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
57343 extern unsigned int svcrdma_max_requests;
57344 extern unsigned int svcrdma_max_req_size;
57345
57346 -extern atomic_t rdma_stat_recv;
57347 -extern atomic_t rdma_stat_read;
57348 -extern atomic_t rdma_stat_write;
57349 -extern atomic_t rdma_stat_sq_starve;
57350 -extern atomic_t rdma_stat_rq_starve;
57351 -extern atomic_t rdma_stat_rq_poll;
57352 -extern atomic_t rdma_stat_rq_prod;
57353 -extern atomic_t rdma_stat_sq_poll;
57354 -extern atomic_t rdma_stat_sq_prod;
57355 +extern atomic_unchecked_t rdma_stat_recv;
57356 +extern atomic_unchecked_t rdma_stat_read;
57357 +extern atomic_unchecked_t rdma_stat_write;
57358 +extern atomic_unchecked_t rdma_stat_sq_starve;
57359 +extern atomic_unchecked_t rdma_stat_rq_starve;
57360 +extern atomic_unchecked_t rdma_stat_rq_poll;
57361 +extern atomic_unchecked_t rdma_stat_rq_prod;
57362 +extern atomic_unchecked_t rdma_stat_sq_poll;
57363 +extern atomic_unchecked_t rdma_stat_sq_prod;
57364
57365 #define RPCRDMA_VERSION 1
57366
57367 diff -urNp linux-2.6.32.42/include/linux/suspend.h linux-2.6.32.42/include/linux/suspend.h
57368 --- linux-2.6.32.42/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
57369 +++ linux-2.6.32.42/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
57370 @@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
57371 * which require special recovery actions in that situation.
57372 */
57373 struct platform_suspend_ops {
57374 - int (*valid)(suspend_state_t state);
57375 - int (*begin)(suspend_state_t state);
57376 - int (*prepare)(void);
57377 - int (*prepare_late)(void);
57378 - int (*enter)(suspend_state_t state);
57379 - void (*wake)(void);
57380 - void (*finish)(void);
57381 - void (*end)(void);
57382 - void (*recover)(void);
57383 + int (* const valid)(suspend_state_t state);
57384 + int (* const begin)(suspend_state_t state);
57385 + int (* const prepare)(void);
57386 + int (* const prepare_late)(void);
57387 + int (* const enter)(suspend_state_t state);
57388 + void (* const wake)(void);
57389 + void (* const finish)(void);
57390 + void (* const end)(void);
57391 + void (* const recover)(void);
57392 };
57393
57394 #ifdef CONFIG_SUSPEND
57395 @@ -120,7 +120,7 @@ struct platform_suspend_ops {
57396 * suspend_set_ops - set platform dependent suspend operations
57397 * @ops: The new suspend operations to set.
57398 */
57399 -extern void suspend_set_ops(struct platform_suspend_ops *ops);
57400 +extern void suspend_set_ops(const struct platform_suspend_ops *ops);
57401 extern int suspend_valid_only_mem(suspend_state_t state);
57402
57403 /**
57404 @@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
57405 #else /* !CONFIG_SUSPEND */
57406 #define suspend_valid_only_mem NULL
57407
57408 -static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
57409 +static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
57410 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
57411 #endif /* !CONFIG_SUSPEND */
57412
57413 @@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
57414 * platforms which require special recovery actions in that situation.
57415 */
57416 struct platform_hibernation_ops {
57417 - int (*begin)(void);
57418 - void (*end)(void);
57419 - int (*pre_snapshot)(void);
57420 - void (*finish)(void);
57421 - int (*prepare)(void);
57422 - int (*enter)(void);
57423 - void (*leave)(void);
57424 - int (*pre_restore)(void);
57425 - void (*restore_cleanup)(void);
57426 - void (*recover)(void);
57427 + int (* const begin)(void);
57428 + void (* const end)(void);
57429 + int (* const pre_snapshot)(void);
57430 + void (* const finish)(void);
57431 + int (* const prepare)(void);
57432 + int (* const enter)(void);
57433 + void (* const leave)(void);
57434 + int (* const pre_restore)(void);
57435 + void (* const restore_cleanup)(void);
57436 + void (* const recover)(void);
57437 };
57438
57439 #ifdef CONFIG_HIBERNATION
57440 @@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
57441 extern void swsusp_unset_page_free(struct page *);
57442 extern unsigned long get_safe_page(gfp_t gfp_mask);
57443
57444 -extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
57445 +extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
57446 extern int hibernate(void);
57447 extern bool system_entering_hibernation(void);
57448 #else /* CONFIG_HIBERNATION */
57449 @@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
57450 static inline void swsusp_set_page_free(struct page *p) {}
57451 static inline void swsusp_unset_page_free(struct page *p) {}
57452
57453 -static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
57454 +static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
57455 static inline int hibernate(void) { return -ENOSYS; }
57456 static inline bool system_entering_hibernation(void) { return false; }
57457 #endif /* CONFIG_HIBERNATION */
57458 diff -urNp linux-2.6.32.42/include/linux/sysctl.h linux-2.6.32.42/include/linux/sysctl.h
57459 --- linux-2.6.32.42/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
57460 +++ linux-2.6.32.42/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
57461 @@ -164,7 +164,11 @@ enum
57462 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
57463 };
57464
57465 -
57466 +#ifdef CONFIG_PAX_SOFTMODE
57467 +enum {
57468 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
57469 +};
57470 +#endif
57471
57472 /* CTL_VM names: */
57473 enum
57474 @@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
57475
57476 extern int proc_dostring(struct ctl_table *, int,
57477 void __user *, size_t *, loff_t *);
57478 +extern int proc_dostring_modpriv(struct ctl_table *, int,
57479 + void __user *, size_t *, loff_t *);
57480 extern int proc_dointvec(struct ctl_table *, int,
57481 void __user *, size_t *, loff_t *);
57482 extern int proc_dointvec_minmax(struct ctl_table *, int,
57483 @@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
57484
57485 extern ctl_handler sysctl_data;
57486 extern ctl_handler sysctl_string;
57487 +extern ctl_handler sysctl_string_modpriv;
57488 extern ctl_handler sysctl_intvec;
57489 extern ctl_handler sysctl_jiffies;
57490 extern ctl_handler sysctl_ms_jiffies;
57491 diff -urNp linux-2.6.32.42/include/linux/sysfs.h linux-2.6.32.42/include/linux/sysfs.h
57492 --- linux-2.6.32.42/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
57493 +++ linux-2.6.32.42/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
57494 @@ -75,8 +75,8 @@ struct bin_attribute {
57495 };
57496
57497 struct sysfs_ops {
57498 - ssize_t (*show)(struct kobject *, struct attribute *,char *);
57499 - ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
57500 + ssize_t (* const show)(struct kobject *, struct attribute *,char *);
57501 + ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
57502 };
57503
57504 struct sysfs_dirent;
57505 diff -urNp linux-2.6.32.42/include/linux/thread_info.h linux-2.6.32.42/include/linux/thread_info.h
57506 --- linux-2.6.32.42/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
57507 +++ linux-2.6.32.42/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
57508 @@ -23,7 +23,7 @@ struct restart_block {
57509 };
57510 /* For futex_wait and futex_wait_requeue_pi */
57511 struct {
57512 - u32 *uaddr;
57513 + u32 __user *uaddr;
57514 u32 val;
57515 u32 flags;
57516 u32 bitset;
57517 diff -urNp linux-2.6.32.42/include/linux/tty.h linux-2.6.32.42/include/linux/tty.h
57518 --- linux-2.6.32.42/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
57519 +++ linux-2.6.32.42/include/linux/tty.h 2011-04-17 15:56:46.000000000 -0400
57520 @@ -13,6 +13,7 @@
57521 #include <linux/tty_driver.h>
57522 #include <linux/tty_ldisc.h>
57523 #include <linux/mutex.h>
57524 +#include <linux/poll.h>
57525
57526 #include <asm/system.h>
57527
57528 @@ -443,7 +444,6 @@ extern int tty_perform_flush(struct tty_
57529 extern dev_t tty_devnum(struct tty_struct *tty);
57530 extern void proc_clear_tty(struct task_struct *p);
57531 extern struct tty_struct *get_current_tty(void);
57532 -extern void tty_default_fops(struct file_operations *fops);
57533 extern struct tty_struct *alloc_tty_struct(void);
57534 extern void free_tty_struct(struct tty_struct *tty);
57535 extern void initialize_tty_struct(struct tty_struct *tty,
57536 @@ -493,6 +493,18 @@ extern void tty_ldisc_begin(void);
57537 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
57538 extern void tty_ldisc_enable(struct tty_struct *tty);
57539
57540 +/* tty_io.c */
57541 +extern ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
57542 +extern ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
57543 +extern unsigned int tty_poll(struct file *, poll_table *);
57544 +#ifdef CONFIG_COMPAT
57545 +extern long tty_compat_ioctl(struct file *file, unsigned int cmd,
57546 + unsigned long arg);
57547 +#else
57548 +#define tty_compat_ioctl NULL
57549 +#endif
57550 +extern int tty_release(struct inode *, struct file *);
57551 +extern int tty_fasync(int fd, struct file *filp, int on);
57552
57553 /* n_tty.c */
57554 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
57555 diff -urNp linux-2.6.32.42/include/linux/tty_ldisc.h linux-2.6.32.42/include/linux/tty_ldisc.h
57556 --- linux-2.6.32.42/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
57557 +++ linux-2.6.32.42/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
57558 @@ -139,7 +139,7 @@ struct tty_ldisc_ops {
57559
57560 struct module *owner;
57561
57562 - int refcount;
57563 + atomic_t refcount;
57564 };
57565
57566 struct tty_ldisc {
57567 diff -urNp linux-2.6.32.42/include/linux/types.h linux-2.6.32.42/include/linux/types.h
57568 --- linux-2.6.32.42/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
57569 +++ linux-2.6.32.42/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
57570 @@ -191,10 +191,26 @@ typedef struct {
57571 volatile int counter;
57572 } atomic_t;
57573
57574 +#ifdef CONFIG_PAX_REFCOUNT
57575 +typedef struct {
57576 + volatile int counter;
57577 +} atomic_unchecked_t;
57578 +#else
57579 +typedef atomic_t atomic_unchecked_t;
57580 +#endif
57581 +
57582 #ifdef CONFIG_64BIT
57583 typedef struct {
57584 volatile long counter;
57585 } atomic64_t;
57586 +
57587 +#ifdef CONFIG_PAX_REFCOUNT
57588 +typedef struct {
57589 + volatile long counter;
57590 +} atomic64_unchecked_t;
57591 +#else
57592 +typedef atomic64_t atomic64_unchecked_t;
57593 +#endif
57594 #endif
57595
57596 struct ustat {
57597 diff -urNp linux-2.6.32.42/include/linux/uaccess.h linux-2.6.32.42/include/linux/uaccess.h
57598 --- linux-2.6.32.42/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
57599 +++ linux-2.6.32.42/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
57600 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
57601 long ret; \
57602 mm_segment_t old_fs = get_fs(); \
57603 \
57604 - set_fs(KERNEL_DS); \
57605 pagefault_disable(); \
57606 + set_fs(KERNEL_DS); \
57607 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
57608 - pagefault_enable(); \
57609 set_fs(old_fs); \
57610 + pagefault_enable(); \
57611 ret; \
57612 })
57613
57614 @@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
57615 * Safely read from address @src to the buffer at @dst. If a kernel fault
57616 * happens, handle that and return -EFAULT.
57617 */
57618 -extern long probe_kernel_read(void *dst, void *src, size_t size);
57619 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
57620
57621 /*
57622 * probe_kernel_write(): safely attempt to write to a location
57623 @@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
57624 * Safely write to address @dst from the buffer at @src. If a kernel fault
57625 * happens, handle that and return -EFAULT.
57626 */
57627 -extern long probe_kernel_write(void *dst, void *src, size_t size);
57628 +extern long probe_kernel_write(void *dst, const void *src, size_t size);
57629
57630 #endif /* __LINUX_UACCESS_H__ */
57631 diff -urNp linux-2.6.32.42/include/linux/unaligned/access_ok.h linux-2.6.32.42/include/linux/unaligned/access_ok.h
57632 --- linux-2.6.32.42/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
57633 +++ linux-2.6.32.42/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
57634 @@ -6,32 +6,32 @@
57635
57636 static inline u16 get_unaligned_le16(const void *p)
57637 {
57638 - return le16_to_cpup((__le16 *)p);
57639 + return le16_to_cpup((const __le16 *)p);
57640 }
57641
57642 static inline u32 get_unaligned_le32(const void *p)
57643 {
57644 - return le32_to_cpup((__le32 *)p);
57645 + return le32_to_cpup((const __le32 *)p);
57646 }
57647
57648 static inline u64 get_unaligned_le64(const void *p)
57649 {
57650 - return le64_to_cpup((__le64 *)p);
57651 + return le64_to_cpup((const __le64 *)p);
57652 }
57653
57654 static inline u16 get_unaligned_be16(const void *p)
57655 {
57656 - return be16_to_cpup((__be16 *)p);
57657 + return be16_to_cpup((const __be16 *)p);
57658 }
57659
57660 static inline u32 get_unaligned_be32(const void *p)
57661 {
57662 - return be32_to_cpup((__be32 *)p);
57663 + return be32_to_cpup((const __be32 *)p);
57664 }
57665
57666 static inline u64 get_unaligned_be64(const void *p)
57667 {
57668 - return be64_to_cpup((__be64 *)p);
57669 + return be64_to_cpup((const __be64 *)p);
57670 }
57671
57672 static inline void put_unaligned_le16(u16 val, void *p)
57673 diff -urNp linux-2.6.32.42/include/linux/vmalloc.h linux-2.6.32.42/include/linux/vmalloc.h
57674 --- linux-2.6.32.42/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
57675 +++ linux-2.6.32.42/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
57676 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
57677 #define VM_MAP 0x00000004 /* vmap()ed pages */
57678 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
57679 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
57680 +
57681 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
57682 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
57683 +#endif
57684 +
57685 /* bits [20..32] reserved for arch specific ioremap internals */
57686
57687 /*
57688 @@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
57689
57690 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
57691
57692 +#define vmalloc(x) \
57693 +({ \
57694 + void *___retval; \
57695 + intoverflow_t ___x = (intoverflow_t)x; \
57696 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
57697 + ___retval = NULL; \
57698 + else \
57699 + ___retval = vmalloc((unsigned long)___x); \
57700 + ___retval; \
57701 +})
57702 +
57703 +#define __vmalloc(x, y, z) \
57704 +({ \
57705 + void *___retval; \
57706 + intoverflow_t ___x = (intoverflow_t)x; \
57707 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
57708 + ___retval = NULL; \
57709 + else \
57710 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
57711 + ___retval; \
57712 +})
57713 +
57714 +#define vmalloc_user(x) \
57715 +({ \
57716 + void *___retval; \
57717 + intoverflow_t ___x = (intoverflow_t)x; \
57718 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
57719 + ___retval = NULL; \
57720 + else \
57721 + ___retval = vmalloc_user((unsigned long)___x); \
57722 + ___retval; \
57723 +})
57724 +
57725 +#define vmalloc_exec(x) \
57726 +({ \
57727 + void *___retval; \
57728 + intoverflow_t ___x = (intoverflow_t)x; \
57729 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
57730 + ___retval = NULL; \
57731 + else \
57732 + ___retval = vmalloc_exec((unsigned long)___x); \
57733 + ___retval; \
57734 +})
57735 +
57736 +#define vmalloc_node(x, y) \
57737 +({ \
57738 + void *___retval; \
57739 + intoverflow_t ___x = (intoverflow_t)x; \
57740 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
57741 + ___retval = NULL; \
57742 + else \
57743 + ___retval = vmalloc_node((unsigned long)___x, (y));\
57744 + ___retval; \
57745 +})
57746 +
57747 +#define vmalloc_32(x) \
57748 +({ \
57749 + void *___retval; \
57750 + intoverflow_t ___x = (intoverflow_t)x; \
57751 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
57752 + ___retval = NULL; \
57753 + else \
57754 + ___retval = vmalloc_32((unsigned long)___x); \
57755 + ___retval; \
57756 +})
57757 +
57758 +#define vmalloc_32_user(x) \
57759 +({ \
57760 + void *___retval; \
57761 + intoverflow_t ___x = (intoverflow_t)x; \
57762 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
57763 + ___retval = NULL; \
57764 + else \
57765 + ___retval = vmalloc_32_user((unsigned long)___x);\
57766 + ___retval; \
57767 +})
57768 +
57769 #endif /* _LINUX_VMALLOC_H */
57770 diff -urNp linux-2.6.32.42/include/linux/vmstat.h linux-2.6.32.42/include/linux/vmstat.h
57771 --- linux-2.6.32.42/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
57772 +++ linux-2.6.32.42/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
57773 @@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
57774 /*
57775 * Zone based page accounting with per cpu differentials.
57776 */
57777 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57778 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57779
57780 static inline void zone_page_state_add(long x, struct zone *zone,
57781 enum zone_stat_item item)
57782 {
57783 - atomic_long_add(x, &zone->vm_stat[item]);
57784 - atomic_long_add(x, &vm_stat[item]);
57785 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
57786 + atomic_long_add_unchecked(x, &vm_stat[item]);
57787 }
57788
57789 static inline unsigned long global_page_state(enum zone_stat_item item)
57790 {
57791 - long x = atomic_long_read(&vm_stat[item]);
57792 + long x = atomic_long_read_unchecked(&vm_stat[item]);
57793 #ifdef CONFIG_SMP
57794 if (x < 0)
57795 x = 0;
57796 @@ -158,7 +158,7 @@ static inline unsigned long global_page_
57797 static inline unsigned long zone_page_state(struct zone *zone,
57798 enum zone_stat_item item)
57799 {
57800 - long x = atomic_long_read(&zone->vm_stat[item]);
57801 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57802 #ifdef CONFIG_SMP
57803 if (x < 0)
57804 x = 0;
57805 @@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
57806 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
57807 enum zone_stat_item item)
57808 {
57809 - long x = atomic_long_read(&zone->vm_stat[item]);
57810 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
57811
57812 #ifdef CONFIG_SMP
57813 int cpu;
57814 @@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
57815
57816 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
57817 {
57818 - atomic_long_inc(&zone->vm_stat[item]);
57819 - atomic_long_inc(&vm_stat[item]);
57820 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
57821 + atomic_long_inc_unchecked(&vm_stat[item]);
57822 }
57823
57824 static inline void __inc_zone_page_state(struct page *page,
57825 @@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
57826
57827 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
57828 {
57829 - atomic_long_dec(&zone->vm_stat[item]);
57830 - atomic_long_dec(&vm_stat[item]);
57831 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
57832 + atomic_long_dec_unchecked(&vm_stat[item]);
57833 }
57834
57835 static inline void __dec_zone_page_state(struct page *page,
57836 diff -urNp linux-2.6.32.42/include/media/v4l2-device.h linux-2.6.32.42/include/media/v4l2-device.h
57837 --- linux-2.6.32.42/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
57838 +++ linux-2.6.32.42/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
57839 @@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
57840 this function returns 0. If the name ends with a digit (e.g. cx18),
57841 then the name will be set to cx18-0 since cx180 looks really odd. */
57842 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
57843 - atomic_t *instance);
57844 + atomic_unchecked_t *instance);
57845
57846 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
57847 Since the parent disappears this ensures that v4l2_dev doesn't have an
57848 diff -urNp linux-2.6.32.42/include/net/flow.h linux-2.6.32.42/include/net/flow.h
57849 --- linux-2.6.32.42/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
57850 +++ linux-2.6.32.42/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
57851 @@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
57852 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
57853 u8 dir, flow_resolve_t resolver);
57854 extern void flow_cache_flush(void);
57855 -extern atomic_t flow_cache_genid;
57856 +extern atomic_unchecked_t flow_cache_genid;
57857
57858 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
57859 {
57860 diff -urNp linux-2.6.32.42/include/net/inetpeer.h linux-2.6.32.42/include/net/inetpeer.h
57861 --- linux-2.6.32.42/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
57862 +++ linux-2.6.32.42/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
57863 @@ -24,7 +24,7 @@ struct inet_peer
57864 __u32 dtime; /* the time of last use of not
57865 * referenced entries */
57866 atomic_t refcnt;
57867 - atomic_t rid; /* Frag reception counter */
57868 + atomic_unchecked_t rid; /* Frag reception counter */
57869 __u32 tcp_ts;
57870 unsigned long tcp_ts_stamp;
57871 };
57872 diff -urNp linux-2.6.32.42/include/net/ip_vs.h linux-2.6.32.42/include/net/ip_vs.h
57873 --- linux-2.6.32.42/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
57874 +++ linux-2.6.32.42/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
57875 @@ -365,7 +365,7 @@ struct ip_vs_conn {
57876 struct ip_vs_conn *control; /* Master control connection */
57877 atomic_t n_control; /* Number of controlled ones */
57878 struct ip_vs_dest *dest; /* real server */
57879 - atomic_t in_pkts; /* incoming packet counter */
57880 + atomic_unchecked_t in_pkts; /* incoming packet counter */
57881
57882 /* packet transmitter for different forwarding methods. If it
57883 mangles the packet, it must return NF_DROP or better NF_STOLEN,
57884 @@ -466,7 +466,7 @@ struct ip_vs_dest {
57885 union nf_inet_addr addr; /* IP address of the server */
57886 __be16 port; /* port number of the server */
57887 volatile unsigned flags; /* dest status flags */
57888 - atomic_t conn_flags; /* flags to copy to conn */
57889 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
57890 atomic_t weight; /* server weight */
57891
57892 atomic_t refcnt; /* reference counter */
57893 diff -urNp linux-2.6.32.42/include/net/irda/ircomm_tty.h linux-2.6.32.42/include/net/irda/ircomm_tty.h
57894 --- linux-2.6.32.42/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
57895 +++ linux-2.6.32.42/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
57896 @@ -35,6 +35,7 @@
57897 #include <linux/termios.h>
57898 #include <linux/timer.h>
57899 #include <linux/tty.h> /* struct tty_struct */
57900 +#include <asm/local.h>
57901
57902 #include <net/irda/irias_object.h>
57903 #include <net/irda/ircomm_core.h>
57904 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
57905 unsigned short close_delay;
57906 unsigned short closing_wait; /* time to wait before closing */
57907
57908 - int open_count;
57909 - int blocked_open; /* # of blocked opens */
57910 + local_t open_count;
57911 + local_t blocked_open; /* # of blocked opens */
57912
57913 /* Protect concurent access to :
57914 * o self->open_count
57915 diff -urNp linux-2.6.32.42/include/net/iucv/af_iucv.h linux-2.6.32.42/include/net/iucv/af_iucv.h
57916 --- linux-2.6.32.42/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
57917 +++ linux-2.6.32.42/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
57918 @@ -87,7 +87,7 @@ struct iucv_sock {
57919 struct iucv_sock_list {
57920 struct hlist_head head;
57921 rwlock_t lock;
57922 - atomic_t autobind_name;
57923 + atomic_unchecked_t autobind_name;
57924 };
57925
57926 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
57927 diff -urNp linux-2.6.32.42/include/net/neighbour.h linux-2.6.32.42/include/net/neighbour.h
57928 --- linux-2.6.32.42/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
57929 +++ linux-2.6.32.42/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
57930 @@ -125,12 +125,12 @@ struct neighbour
57931 struct neigh_ops
57932 {
57933 int family;
57934 - void (*solicit)(struct neighbour *, struct sk_buff*);
57935 - void (*error_report)(struct neighbour *, struct sk_buff*);
57936 - int (*output)(struct sk_buff*);
57937 - int (*connected_output)(struct sk_buff*);
57938 - int (*hh_output)(struct sk_buff*);
57939 - int (*queue_xmit)(struct sk_buff*);
57940 + void (* const solicit)(struct neighbour *, struct sk_buff*);
57941 + void (* const error_report)(struct neighbour *, struct sk_buff*);
57942 + int (* const output)(struct sk_buff*);
57943 + int (* const connected_output)(struct sk_buff*);
57944 + int (* const hh_output)(struct sk_buff*);
57945 + int (* const queue_xmit)(struct sk_buff*);
57946 };
57947
57948 struct pneigh_entry
57949 diff -urNp linux-2.6.32.42/include/net/netlink.h linux-2.6.32.42/include/net/netlink.h
57950 --- linux-2.6.32.42/include/net/netlink.h 2011-03-27 14:31:47.000000000 -0400
57951 +++ linux-2.6.32.42/include/net/netlink.h 2011-04-17 15:56:46.000000000 -0400
57952 @@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
57953 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
57954 {
57955 if (mark)
57956 - skb_trim(skb, (unsigned char *) mark - skb->data);
57957 + skb_trim(skb, (const unsigned char *) mark - skb->data);
57958 }
57959
57960 /**
57961 diff -urNp linux-2.6.32.42/include/net/netns/ipv4.h linux-2.6.32.42/include/net/netns/ipv4.h
57962 --- linux-2.6.32.42/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
57963 +++ linux-2.6.32.42/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
57964 @@ -54,7 +54,7 @@ struct netns_ipv4 {
57965 int current_rt_cache_rebuild_count;
57966
57967 struct timer_list rt_secret_timer;
57968 - atomic_t rt_genid;
57969 + atomic_unchecked_t rt_genid;
57970
57971 #ifdef CONFIG_IP_MROUTE
57972 struct sock *mroute_sk;
57973 diff -urNp linux-2.6.32.42/include/net/sctp/sctp.h linux-2.6.32.42/include/net/sctp/sctp.h
57974 --- linux-2.6.32.42/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
57975 +++ linux-2.6.32.42/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
57976 @@ -305,8 +305,8 @@ extern int sctp_debug_flag;
57977
57978 #else /* SCTP_DEBUG */
57979
57980 -#define SCTP_DEBUG_PRINTK(whatever...)
57981 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
57982 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
57983 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
57984 #define SCTP_ENABLE_DEBUG
57985 #define SCTP_DISABLE_DEBUG
57986 #define SCTP_ASSERT(expr, str, func)
57987 diff -urNp linux-2.6.32.42/include/net/sock.h linux-2.6.32.42/include/net/sock.h
57988 --- linux-2.6.32.42/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
57989 +++ linux-2.6.32.42/include/net/sock.h 2011-05-04 17:56:28.000000000 -0400
57990 @@ -272,7 +272,7 @@ struct sock {
57991 rwlock_t sk_callback_lock;
57992 int sk_err,
57993 sk_err_soft;
57994 - atomic_t sk_drops;
57995 + atomic_unchecked_t sk_drops;
57996 unsigned short sk_ack_backlog;
57997 unsigned short sk_max_ack_backlog;
57998 __u32 sk_priority;
57999 diff -urNp linux-2.6.32.42/include/net/tcp.h linux-2.6.32.42/include/net/tcp.h
58000 --- linux-2.6.32.42/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
58001 +++ linux-2.6.32.42/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
58002 @@ -1444,6 +1444,7 @@ enum tcp_seq_states {
58003 struct tcp_seq_afinfo {
58004 char *name;
58005 sa_family_t family;
58006 + /* cannot be const */
58007 struct file_operations seq_fops;
58008 struct seq_operations seq_ops;
58009 };
58010 diff -urNp linux-2.6.32.42/include/net/udp.h linux-2.6.32.42/include/net/udp.h
58011 --- linux-2.6.32.42/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
58012 +++ linux-2.6.32.42/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
58013 @@ -187,6 +187,7 @@ struct udp_seq_afinfo {
58014 char *name;
58015 sa_family_t family;
58016 struct udp_table *udp_table;
58017 + /* cannot be const */
58018 struct file_operations seq_fops;
58019 struct seq_operations seq_ops;
58020 };
58021 diff -urNp linux-2.6.32.42/include/scsi/scsi_device.h linux-2.6.32.42/include/scsi/scsi_device.h
58022 --- linux-2.6.32.42/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
58023 +++ linux-2.6.32.42/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
58024 @@ -156,9 +156,9 @@ struct scsi_device {
58025 unsigned int max_device_blocked; /* what device_blocked counts down from */
58026 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
58027
58028 - atomic_t iorequest_cnt;
58029 - atomic_t iodone_cnt;
58030 - atomic_t ioerr_cnt;
58031 + atomic_unchecked_t iorequest_cnt;
58032 + atomic_unchecked_t iodone_cnt;
58033 + atomic_unchecked_t ioerr_cnt;
58034
58035 struct device sdev_gendev,
58036 sdev_dev;
58037 diff -urNp linux-2.6.32.42/include/sound/ac97_codec.h linux-2.6.32.42/include/sound/ac97_codec.h
58038 --- linux-2.6.32.42/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
58039 +++ linux-2.6.32.42/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
58040 @@ -419,15 +419,15 @@
58041 struct snd_ac97;
58042
58043 struct snd_ac97_build_ops {
58044 - int (*build_3d) (struct snd_ac97 *ac97);
58045 - int (*build_specific) (struct snd_ac97 *ac97);
58046 - int (*build_spdif) (struct snd_ac97 *ac97);
58047 - int (*build_post_spdif) (struct snd_ac97 *ac97);
58048 + int (* const build_3d) (struct snd_ac97 *ac97);
58049 + int (* const build_specific) (struct snd_ac97 *ac97);
58050 + int (* const build_spdif) (struct snd_ac97 *ac97);
58051 + int (* const build_post_spdif) (struct snd_ac97 *ac97);
58052 #ifdef CONFIG_PM
58053 - void (*suspend) (struct snd_ac97 *ac97);
58054 - void (*resume) (struct snd_ac97 *ac97);
58055 + void (* const suspend) (struct snd_ac97 *ac97);
58056 + void (* const resume) (struct snd_ac97 *ac97);
58057 #endif
58058 - void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
58059 + void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
58060 };
58061
58062 struct snd_ac97_bus_ops {
58063 @@ -477,7 +477,7 @@ struct snd_ac97_template {
58064
58065 struct snd_ac97 {
58066 /* -- lowlevel (hardware) driver specific -- */
58067 - struct snd_ac97_build_ops * build_ops;
58068 + const struct snd_ac97_build_ops * build_ops;
58069 void *private_data;
58070 void (*private_free) (struct snd_ac97 *ac97);
58071 /* --- */
58072 diff -urNp linux-2.6.32.42/include/sound/ymfpci.h linux-2.6.32.42/include/sound/ymfpci.h
58073 --- linux-2.6.32.42/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
58074 +++ linux-2.6.32.42/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
58075 @@ -358,7 +358,7 @@ struct snd_ymfpci {
58076 spinlock_t reg_lock;
58077 spinlock_t voice_lock;
58078 wait_queue_head_t interrupt_sleep;
58079 - atomic_t interrupt_sleep_count;
58080 + atomic_unchecked_t interrupt_sleep_count;
58081 struct snd_info_entry *proc_entry;
58082 const struct firmware *dsp_microcode;
58083 const struct firmware *controller_microcode;
58084 diff -urNp linux-2.6.32.42/include/trace/events/irq.h linux-2.6.32.42/include/trace/events/irq.h
58085 --- linux-2.6.32.42/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
58086 +++ linux-2.6.32.42/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
58087 @@ -34,7 +34,7 @@
58088 */
58089 TRACE_EVENT(irq_handler_entry,
58090
58091 - TP_PROTO(int irq, struct irqaction *action),
58092 + TP_PROTO(int irq, const struct irqaction *action),
58093
58094 TP_ARGS(irq, action),
58095
58096 @@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
58097 */
58098 TRACE_EVENT(irq_handler_exit,
58099
58100 - TP_PROTO(int irq, struct irqaction *action, int ret),
58101 + TP_PROTO(int irq, const struct irqaction *action, int ret),
58102
58103 TP_ARGS(irq, action, ret),
58104
58105 @@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
58106 */
58107 TRACE_EVENT(softirq_entry,
58108
58109 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
58110 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
58111
58112 TP_ARGS(h, vec),
58113
58114 @@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
58115 */
58116 TRACE_EVENT(softirq_exit,
58117
58118 - TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
58119 + TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
58120
58121 TP_ARGS(h, vec),
58122
58123 diff -urNp linux-2.6.32.42/include/video/uvesafb.h linux-2.6.32.42/include/video/uvesafb.h
58124 --- linux-2.6.32.42/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
58125 +++ linux-2.6.32.42/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
58126 @@ -177,6 +177,7 @@ struct uvesafb_par {
58127 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
58128 u8 pmi_setpal; /* PMI for palette changes */
58129 u16 *pmi_base; /* protected mode interface location */
58130 + u8 *pmi_code; /* protected mode code location */
58131 void *pmi_start;
58132 void *pmi_pal;
58133 u8 *vbe_state_orig; /*
58134 diff -urNp linux-2.6.32.42/init/do_mounts.c linux-2.6.32.42/init/do_mounts.c
58135 --- linux-2.6.32.42/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
58136 +++ linux-2.6.32.42/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
58137 @@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
58138
58139 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
58140 {
58141 - int err = sys_mount(name, "/root", fs, flags, data);
58142 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
58143 if (err)
58144 return err;
58145
58146 - sys_chdir("/root");
58147 + sys_chdir((__force const char __user *)"/root");
58148 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
58149 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
58150 current->fs->pwd.mnt->mnt_sb->s_type->name,
58151 @@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
58152 va_start(args, fmt);
58153 vsprintf(buf, fmt, args);
58154 va_end(args);
58155 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
58156 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
58157 if (fd >= 0) {
58158 sys_ioctl(fd, FDEJECT, 0);
58159 sys_close(fd);
58160 }
58161 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
58162 - fd = sys_open("/dev/console", O_RDWR, 0);
58163 + fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
58164 if (fd >= 0) {
58165 sys_ioctl(fd, TCGETS, (long)&termios);
58166 termios.c_lflag &= ~ICANON;
58167 sys_ioctl(fd, TCSETSF, (long)&termios);
58168 - sys_read(fd, &c, 1);
58169 + sys_read(fd, (char __user *)&c, 1);
58170 termios.c_lflag |= ICANON;
58171 sys_ioctl(fd, TCSETSF, (long)&termios);
58172 sys_close(fd);
58173 @@ -416,6 +416,6 @@ void __init prepare_namespace(void)
58174 mount_root();
58175 out:
58176 devtmpfs_mount("dev");
58177 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58178 - sys_chroot(".");
58179 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58180 + sys_chroot((__force char __user *)".");
58181 }
58182 diff -urNp linux-2.6.32.42/init/do_mounts.h linux-2.6.32.42/init/do_mounts.h
58183 --- linux-2.6.32.42/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
58184 +++ linux-2.6.32.42/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
58185 @@ -15,15 +15,15 @@ extern int root_mountflags;
58186
58187 static inline int create_dev(char *name, dev_t dev)
58188 {
58189 - sys_unlink(name);
58190 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
58191 + sys_unlink((__force char __user *)name);
58192 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
58193 }
58194
58195 #if BITS_PER_LONG == 32
58196 static inline u32 bstat(char *name)
58197 {
58198 struct stat64 stat;
58199 - if (sys_stat64(name, &stat) != 0)
58200 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
58201 return 0;
58202 if (!S_ISBLK(stat.st_mode))
58203 return 0;
58204 diff -urNp linux-2.6.32.42/init/do_mounts_initrd.c linux-2.6.32.42/init/do_mounts_initrd.c
58205 --- linux-2.6.32.42/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
58206 +++ linux-2.6.32.42/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
58207 @@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
58208 sys_close(old_fd);sys_close(root_fd);
58209 sys_close(0);sys_close(1);sys_close(2);
58210 sys_setsid();
58211 - (void) sys_open("/dev/console",O_RDWR,0);
58212 + (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
58213 (void) sys_dup(0);
58214 (void) sys_dup(0);
58215 return kernel_execve(shell, argv, envp_init);
58216 @@ -47,13 +47,13 @@ static void __init handle_initrd(void)
58217 create_dev("/dev/root.old", Root_RAM0);
58218 /* mount initrd on rootfs' /root */
58219 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
58220 - sys_mkdir("/old", 0700);
58221 - root_fd = sys_open("/", 0, 0);
58222 - old_fd = sys_open("/old", 0, 0);
58223 + sys_mkdir((__force const char __user *)"/old", 0700);
58224 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
58225 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
58226 /* move initrd over / and chdir/chroot in initrd root */
58227 - sys_chdir("/root");
58228 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
58229 - sys_chroot(".");
58230 + sys_chdir((__force const char __user *)"/root");
58231 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58232 + sys_chroot((__force const char __user *)".");
58233
58234 /*
58235 * In case that a resume from disk is carried out by linuxrc or one of
58236 @@ -70,15 +70,15 @@ static void __init handle_initrd(void)
58237
58238 /* move initrd to rootfs' /old */
58239 sys_fchdir(old_fd);
58240 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
58241 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
58242 /* switch root and cwd back to / of rootfs */
58243 sys_fchdir(root_fd);
58244 - sys_chroot(".");
58245 + sys_chroot((__force const char __user *)".");
58246 sys_close(old_fd);
58247 sys_close(root_fd);
58248
58249 if (new_decode_dev(real_root_dev) == Root_RAM0) {
58250 - sys_chdir("/old");
58251 + sys_chdir((__force const char __user *)"/old");
58252 return;
58253 }
58254
58255 @@ -86,17 +86,17 @@ static void __init handle_initrd(void)
58256 mount_root();
58257
58258 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
58259 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
58260 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
58261 if (!error)
58262 printk("okay\n");
58263 else {
58264 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
58265 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
58266 if (error == -ENOENT)
58267 printk("/initrd does not exist. Ignored.\n");
58268 else
58269 printk("failed\n");
58270 printk(KERN_NOTICE "Unmounting old root\n");
58271 - sys_umount("/old", MNT_DETACH);
58272 + sys_umount((__force char __user *)"/old", MNT_DETACH);
58273 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
58274 if (fd < 0) {
58275 error = fd;
58276 @@ -119,11 +119,11 @@ int __init initrd_load(void)
58277 * mounted in the normal path.
58278 */
58279 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
58280 - sys_unlink("/initrd.image");
58281 + sys_unlink((__force const char __user *)"/initrd.image");
58282 handle_initrd();
58283 return 1;
58284 }
58285 }
58286 - sys_unlink("/initrd.image");
58287 + sys_unlink((__force const char __user *)"/initrd.image");
58288 return 0;
58289 }
58290 diff -urNp linux-2.6.32.42/init/do_mounts_md.c linux-2.6.32.42/init/do_mounts_md.c
58291 --- linux-2.6.32.42/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
58292 +++ linux-2.6.32.42/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
58293 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
58294 partitioned ? "_d" : "", minor,
58295 md_setup_args[ent].device_names);
58296
58297 - fd = sys_open(name, 0, 0);
58298 + fd = sys_open((__force char __user *)name, 0, 0);
58299 if (fd < 0) {
58300 printk(KERN_ERR "md: open failed - cannot start "
58301 "array %s\n", name);
58302 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
58303 * array without it
58304 */
58305 sys_close(fd);
58306 - fd = sys_open(name, 0, 0);
58307 + fd = sys_open((__force char __user *)name, 0, 0);
58308 sys_ioctl(fd, BLKRRPART, 0);
58309 }
58310 sys_close(fd);
58311 @@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
58312
58313 wait_for_device_probe();
58314
58315 - fd = sys_open("/dev/md0", 0, 0);
58316 + fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
58317 if (fd >= 0) {
58318 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
58319 sys_close(fd);
58320 diff -urNp linux-2.6.32.42/init/initramfs.c linux-2.6.32.42/init/initramfs.c
58321 --- linux-2.6.32.42/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
58322 +++ linux-2.6.32.42/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
58323 @@ -74,7 +74,7 @@ static void __init free_hash(void)
58324 }
58325 }
58326
58327 -static long __init do_utime(char __user *filename, time_t mtime)
58328 +static long __init do_utime(__force char __user *filename, time_t mtime)
58329 {
58330 struct timespec t[2];
58331
58332 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
58333 struct dir_entry *de, *tmp;
58334 list_for_each_entry_safe(de, tmp, &dir_list, list) {
58335 list_del(&de->list);
58336 - do_utime(de->name, de->mtime);
58337 + do_utime((__force char __user *)de->name, de->mtime);
58338 kfree(de->name);
58339 kfree(de);
58340 }
58341 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
58342 if (nlink >= 2) {
58343 char *old = find_link(major, minor, ino, mode, collected);
58344 if (old)
58345 - return (sys_link(old, collected) < 0) ? -1 : 1;
58346 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
58347 }
58348 return 0;
58349 }
58350 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
58351 {
58352 struct stat st;
58353
58354 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
58355 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
58356 if (S_ISDIR(st.st_mode))
58357 - sys_rmdir(path);
58358 + sys_rmdir((__force char __user *)path);
58359 else
58360 - sys_unlink(path);
58361 + sys_unlink((__force char __user *)path);
58362 }
58363 }
58364
58365 @@ -305,7 +305,7 @@ static int __init do_name(void)
58366 int openflags = O_WRONLY|O_CREAT;
58367 if (ml != 1)
58368 openflags |= O_TRUNC;
58369 - wfd = sys_open(collected, openflags, mode);
58370 + wfd = sys_open((__force char __user *)collected, openflags, mode);
58371
58372 if (wfd >= 0) {
58373 sys_fchown(wfd, uid, gid);
58374 @@ -317,17 +317,17 @@ static int __init do_name(void)
58375 }
58376 }
58377 } else if (S_ISDIR(mode)) {
58378 - sys_mkdir(collected, mode);
58379 - sys_chown(collected, uid, gid);
58380 - sys_chmod(collected, mode);
58381 + sys_mkdir((__force char __user *)collected, mode);
58382 + sys_chown((__force char __user *)collected, uid, gid);
58383 + sys_chmod((__force char __user *)collected, mode);
58384 dir_add(collected, mtime);
58385 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
58386 S_ISFIFO(mode) || S_ISSOCK(mode)) {
58387 if (maybe_link() == 0) {
58388 - sys_mknod(collected, mode, rdev);
58389 - sys_chown(collected, uid, gid);
58390 - sys_chmod(collected, mode);
58391 - do_utime(collected, mtime);
58392 + sys_mknod((__force char __user *)collected, mode, rdev);
58393 + sys_chown((__force char __user *)collected, uid, gid);
58394 + sys_chmod((__force char __user *)collected, mode);
58395 + do_utime((__force char __user *)collected, mtime);
58396 }
58397 }
58398 return 0;
58399 @@ -336,15 +336,15 @@ static int __init do_name(void)
58400 static int __init do_copy(void)
58401 {
58402 if (count >= body_len) {
58403 - sys_write(wfd, victim, body_len);
58404 + sys_write(wfd, (__force char __user *)victim, body_len);
58405 sys_close(wfd);
58406 - do_utime(vcollected, mtime);
58407 + do_utime((__force char __user *)vcollected, mtime);
58408 kfree(vcollected);
58409 eat(body_len);
58410 state = SkipIt;
58411 return 0;
58412 } else {
58413 - sys_write(wfd, victim, count);
58414 + sys_write(wfd, (__force char __user *)victim, count);
58415 body_len -= count;
58416 eat(count);
58417 return 1;
58418 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
58419 {
58420 collected[N_ALIGN(name_len) + body_len] = '\0';
58421 clean_path(collected, 0);
58422 - sys_symlink(collected + N_ALIGN(name_len), collected);
58423 - sys_lchown(collected, uid, gid);
58424 - do_utime(collected, mtime);
58425 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
58426 + sys_lchown((__force char __user *)collected, uid, gid);
58427 + do_utime((__force char __user *)collected, mtime);
58428 state = SkipIt;
58429 next_state = Reset;
58430 return 0;
58431 diff -urNp linux-2.6.32.42/init/Kconfig linux-2.6.32.42/init/Kconfig
58432 --- linux-2.6.32.42/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
58433 +++ linux-2.6.32.42/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
58434 @@ -1004,7 +1004,7 @@ config SLUB_DEBUG
58435
58436 config COMPAT_BRK
58437 bool "Disable heap randomization"
58438 - default y
58439 + default n
58440 help
58441 Randomizing heap placement makes heap exploits harder, but it
58442 also breaks ancient binaries (including anything libc5 based).
58443 diff -urNp linux-2.6.32.42/init/main.c linux-2.6.32.42/init/main.c
58444 --- linux-2.6.32.42/init/main.c 2011-05-10 22:12:01.000000000 -0400
58445 +++ linux-2.6.32.42/init/main.c 2011-05-22 23:02:06.000000000 -0400
58446 @@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
58447 #ifdef CONFIG_TC
58448 extern void tc_init(void);
58449 #endif
58450 +extern void grsecurity_init(void);
58451
58452 enum system_states system_state __read_mostly;
58453 EXPORT_SYMBOL(system_state);
58454 @@ -183,6 +184,49 @@ static int __init set_reset_devices(char
58455
58456 __setup("reset_devices", set_reset_devices);
58457
58458 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
58459 +extern char pax_enter_kernel_user[];
58460 +extern char pax_exit_kernel_user[];
58461 +extern pgdval_t clone_pgd_mask;
58462 +#endif
58463 +
58464 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
58465 +static int __init setup_pax_nouderef(char *str)
58466 +{
58467 +#ifdef CONFIG_X86_32
58468 + unsigned int cpu;
58469 + struct desc_struct *gdt;
58470 +
58471 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
58472 + gdt = get_cpu_gdt_table(cpu);
58473 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
58474 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
58475 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
58476 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
58477 + }
58478 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
58479 +#else
58480 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
58481 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
58482 + clone_pgd_mask = ~(pgdval_t)0UL;
58483 +#endif
58484 +
58485 + return 0;
58486 +}
58487 +early_param("pax_nouderef", setup_pax_nouderef);
58488 +#endif
58489 +
58490 +#ifdef CONFIG_PAX_SOFTMODE
58491 +unsigned int pax_softmode;
58492 +
58493 +static int __init setup_pax_softmode(char *str)
58494 +{
58495 + get_option(&str, &pax_softmode);
58496 + return 1;
58497 +}
58498 +__setup("pax_softmode=", setup_pax_softmode);
58499 +#endif
58500 +
58501 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
58502 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
58503 static const char *panic_later, *panic_param;
58504 @@ -705,52 +749,53 @@ int initcall_debug;
58505 core_param(initcall_debug, initcall_debug, bool, 0644);
58506
58507 static char msgbuf[64];
58508 -static struct boot_trace_call call;
58509 -static struct boot_trace_ret ret;
58510 +static struct boot_trace_call trace_call;
58511 +static struct boot_trace_ret trace_ret;
58512
58513 int do_one_initcall(initcall_t fn)
58514 {
58515 int count = preempt_count();
58516 ktime_t calltime, delta, rettime;
58517 + const char *msg1 = "", *msg2 = "";
58518
58519 if (initcall_debug) {
58520 - call.caller = task_pid_nr(current);
58521 - printk("calling %pF @ %i\n", fn, call.caller);
58522 + trace_call.caller = task_pid_nr(current);
58523 + printk("calling %pF @ %i\n", fn, trace_call.caller);
58524 calltime = ktime_get();
58525 - trace_boot_call(&call, fn);
58526 + trace_boot_call(&trace_call, fn);
58527 enable_boot_trace();
58528 }
58529
58530 - ret.result = fn();
58531 + trace_ret.result = fn();
58532
58533 if (initcall_debug) {
58534 disable_boot_trace();
58535 rettime = ktime_get();
58536 delta = ktime_sub(rettime, calltime);
58537 - ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
58538 - trace_boot_ret(&ret, fn);
58539 + trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
58540 + trace_boot_ret(&trace_ret, fn);
58541 printk("initcall %pF returned %d after %Ld usecs\n", fn,
58542 - ret.result, ret.duration);
58543 + trace_ret.result, trace_ret.duration);
58544 }
58545
58546 msgbuf[0] = 0;
58547
58548 - if (ret.result && ret.result != -ENODEV && initcall_debug)
58549 - sprintf(msgbuf, "error code %d ", ret.result);
58550 + if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
58551 + sprintf(msgbuf, "error code %d ", trace_ret.result);
58552
58553 if (preempt_count() != count) {
58554 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
58555 + msg1 = " preemption imbalance";
58556 preempt_count() = count;
58557 }
58558 if (irqs_disabled()) {
58559 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
58560 + msg2 = " disabled interrupts";
58561 local_irq_enable();
58562 }
58563 - if (msgbuf[0]) {
58564 - printk("initcall %pF returned with %s\n", fn, msgbuf);
58565 + if (msgbuf[0] || *msg1 || *msg2) {
58566 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
58567 }
58568
58569 - return ret.result;
58570 + return trace_ret.result;
58571 }
58572
58573
58574 @@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
58575 if (!ramdisk_execute_command)
58576 ramdisk_execute_command = "/init";
58577
58578 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
58579 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
58580 ramdisk_execute_command = NULL;
58581 prepare_namespace();
58582 }
58583
58584 + grsecurity_init();
58585 +
58586 /*
58587 * Ok, we have completed the initial bootup, and
58588 * we're essentially up and running. Get rid of the
58589 diff -urNp linux-2.6.32.42/init/noinitramfs.c linux-2.6.32.42/init/noinitramfs.c
58590 --- linux-2.6.32.42/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
58591 +++ linux-2.6.32.42/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
58592 @@ -29,7 +29,7 @@ static int __init default_rootfs(void)
58593 {
58594 int err;
58595
58596 - err = sys_mkdir("/dev", 0755);
58597 + err = sys_mkdir((const char __user *)"/dev", 0755);
58598 if (err < 0)
58599 goto out;
58600
58601 @@ -39,7 +39,7 @@ static int __init default_rootfs(void)
58602 if (err < 0)
58603 goto out;
58604
58605 - err = sys_mkdir("/root", 0700);
58606 + err = sys_mkdir((const char __user *)"/root", 0700);
58607 if (err < 0)
58608 goto out;
58609
58610 diff -urNp linux-2.6.32.42/ipc/mqueue.c linux-2.6.32.42/ipc/mqueue.c
58611 --- linux-2.6.32.42/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
58612 +++ linux-2.6.32.42/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
58613 @@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
58614 mq_bytes = (mq_msg_tblsz +
58615 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
58616
58617 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
58618 spin_lock(&mq_lock);
58619 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
58620 u->mq_bytes + mq_bytes >
58621 diff -urNp linux-2.6.32.42/ipc/sem.c linux-2.6.32.42/ipc/sem.c
58622 --- linux-2.6.32.42/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
58623 +++ linux-2.6.32.42/ipc/sem.c 2011-05-16 21:46:57.000000000 -0400
58624 @@ -671,6 +671,8 @@ static int semctl_main(struct ipc_namesp
58625 ushort* sem_io = fast_sem_io;
58626 int nsems;
58627
58628 + pax_track_stack();
58629 +
58630 sma = sem_lock_check(ns, semid);
58631 if (IS_ERR(sma))
58632 return PTR_ERR(sma);
58633 @@ -1071,6 +1073,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
58634 unsigned long jiffies_left = 0;
58635 struct ipc_namespace *ns;
58636
58637 + pax_track_stack();
58638 +
58639 ns = current->nsproxy->ipc_ns;
58640
58641 if (nsops < 1 || semid < 0)
58642 diff -urNp linux-2.6.32.42/ipc/shm.c linux-2.6.32.42/ipc/shm.c
58643 --- linux-2.6.32.42/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
58644 +++ linux-2.6.32.42/ipc/shm.c 2011-04-17 15:56:46.000000000 -0400
58645 @@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
58646 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
58647 #endif
58648
58649 +#ifdef CONFIG_GRKERNSEC
58650 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58651 + const time_t shm_createtime, const uid_t cuid,
58652 + const int shmid);
58653 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
58654 + const time_t shm_createtime);
58655 +#endif
58656 +
58657 void shm_init_ns(struct ipc_namespace *ns)
58658 {
58659 ns->shm_ctlmax = SHMMAX;
58660 @@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
58661 shp->shm_lprid = 0;
58662 shp->shm_atim = shp->shm_dtim = 0;
58663 shp->shm_ctim = get_seconds();
58664 +#ifdef CONFIG_GRKERNSEC
58665 + {
58666 + struct timespec timeval;
58667 + do_posix_clock_monotonic_gettime(&timeval);
58668 +
58669 + shp->shm_createtime = timeval.tv_sec;
58670 + }
58671 +#endif
58672 shp->shm_segsz = size;
58673 shp->shm_nattch = 0;
58674 shp->shm_file = file;
58675 @@ -880,9 +896,21 @@ long do_shmat(int shmid, char __user *sh
58676 if (err)
58677 goto out_unlock;
58678
58679 +#ifdef CONFIG_GRKERNSEC
58680 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
58681 + shp->shm_perm.cuid, shmid) ||
58682 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
58683 + err = -EACCES;
58684 + goto out_unlock;
58685 + }
58686 +#endif
58687 +
58688 path.dentry = dget(shp->shm_file->f_path.dentry);
58689 path.mnt = shp->shm_file->f_path.mnt;
58690 shp->shm_nattch++;
58691 +#ifdef CONFIG_GRKERNSEC
58692 + shp->shm_lapid = current->pid;
58693 +#endif
58694 size = i_size_read(path.dentry->d_inode);
58695 shm_unlock(shp);
58696
58697 diff -urNp linux-2.6.32.42/kernel/acct.c linux-2.6.32.42/kernel/acct.c
58698 --- linux-2.6.32.42/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
58699 +++ linux-2.6.32.42/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
58700 @@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
58701 */
58702 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
58703 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
58704 - file->f_op->write(file, (char *)&ac,
58705 + file->f_op->write(file, (__force char __user *)&ac,
58706 sizeof(acct_t), &file->f_pos);
58707 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
58708 set_fs(fs);
58709 diff -urNp linux-2.6.32.42/kernel/audit.c linux-2.6.32.42/kernel/audit.c
58710 --- linux-2.6.32.42/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
58711 +++ linux-2.6.32.42/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
58712 @@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
58713 3) suppressed due to audit_rate_limit
58714 4) suppressed due to audit_backlog_limit
58715 */
58716 -static atomic_t audit_lost = ATOMIC_INIT(0);
58717 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
58718
58719 /* The netlink socket. */
58720 static struct sock *audit_sock;
58721 @@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
58722 unsigned long now;
58723 int print;
58724
58725 - atomic_inc(&audit_lost);
58726 + atomic_inc_unchecked(&audit_lost);
58727
58728 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
58729
58730 @@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
58731 printk(KERN_WARNING
58732 "audit: audit_lost=%d audit_rate_limit=%d "
58733 "audit_backlog_limit=%d\n",
58734 - atomic_read(&audit_lost),
58735 + atomic_read_unchecked(&audit_lost),
58736 audit_rate_limit,
58737 audit_backlog_limit);
58738 audit_panic(message);
58739 @@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
58740 status_set.pid = audit_pid;
58741 status_set.rate_limit = audit_rate_limit;
58742 status_set.backlog_limit = audit_backlog_limit;
58743 - status_set.lost = atomic_read(&audit_lost);
58744 + status_set.lost = atomic_read_unchecked(&audit_lost);
58745 status_set.backlog = skb_queue_len(&audit_skb_queue);
58746 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
58747 &status_set, sizeof(status_set));
58748 @@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
58749 spin_unlock_irq(&tsk->sighand->siglock);
58750 }
58751 read_unlock(&tasklist_lock);
58752 - audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
58753 - &s, sizeof(s));
58754 +
58755 + if (!err)
58756 + audit_send_reply(NETLINK_CB(skb).pid, seq,
58757 + AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
58758 break;
58759 }
58760 case AUDIT_TTY_SET: {
58761 diff -urNp linux-2.6.32.42/kernel/auditsc.c linux-2.6.32.42/kernel/auditsc.c
58762 --- linux-2.6.32.42/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
58763 +++ linux-2.6.32.42/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
58764 @@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
58765 }
58766
58767 /* global counter which is incremented every time something logs in */
58768 -static atomic_t session_id = ATOMIC_INIT(0);
58769 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
58770
58771 /**
58772 * audit_set_loginuid - set a task's audit_context loginuid
58773 @@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
58774 */
58775 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
58776 {
58777 - unsigned int sessionid = atomic_inc_return(&session_id);
58778 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
58779 struct audit_context *context = task->audit_context;
58780
58781 if (context && context->in_syscall) {
58782 diff -urNp linux-2.6.32.42/kernel/capability.c linux-2.6.32.42/kernel/capability.c
58783 --- linux-2.6.32.42/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
58784 +++ linux-2.6.32.42/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
58785 @@ -305,10 +305,26 @@ int capable(int cap)
58786 BUG();
58787 }
58788
58789 - if (security_capable(cap) == 0) {
58790 + if (security_capable(cap) == 0 && gr_is_capable(cap)) {
58791 current->flags |= PF_SUPERPRIV;
58792 return 1;
58793 }
58794 return 0;
58795 }
58796 +
58797 +int capable_nolog(int cap)
58798 +{
58799 + if (unlikely(!cap_valid(cap))) {
58800 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
58801 + BUG();
58802 + }
58803 +
58804 + if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
58805 + current->flags |= PF_SUPERPRIV;
58806 + return 1;
58807 + }
58808 + return 0;
58809 +}
58810 +
58811 EXPORT_SYMBOL(capable);
58812 +EXPORT_SYMBOL(capable_nolog);
58813 diff -urNp linux-2.6.32.42/kernel/cgroup.c linux-2.6.32.42/kernel/cgroup.c
58814 --- linux-2.6.32.42/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
58815 +++ linux-2.6.32.42/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
58816 @@ -536,6 +536,8 @@ static struct css_set *find_css_set(
58817 struct hlist_head *hhead;
58818 struct cg_cgroup_link *link;
58819
58820 + pax_track_stack();
58821 +
58822 /* First see if we already have a cgroup group that matches
58823 * the desired set */
58824 read_lock(&css_set_lock);
58825 diff -urNp linux-2.6.32.42/kernel/configs.c linux-2.6.32.42/kernel/configs.c
58826 --- linux-2.6.32.42/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
58827 +++ linux-2.6.32.42/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
58828 @@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
58829 struct proc_dir_entry *entry;
58830
58831 /* create the current config file */
58832 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
58833 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
58834 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
58835 + &ikconfig_file_ops);
58836 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58837 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
58838 + &ikconfig_file_ops);
58839 +#endif
58840 +#else
58841 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
58842 &ikconfig_file_ops);
58843 +#endif
58844 +
58845 if (!entry)
58846 return -ENOMEM;
58847
58848 diff -urNp linux-2.6.32.42/kernel/cpu.c linux-2.6.32.42/kernel/cpu.c
58849 --- linux-2.6.32.42/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
58850 +++ linux-2.6.32.42/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
58851 @@ -19,7 +19,7 @@
58852 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
58853 static DEFINE_MUTEX(cpu_add_remove_lock);
58854
58855 -static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
58856 +static RAW_NOTIFIER_HEAD(cpu_chain);
58857
58858 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
58859 * Should always be manipulated under cpu_add_remove_lock
58860 diff -urNp linux-2.6.32.42/kernel/cred.c linux-2.6.32.42/kernel/cred.c
58861 --- linux-2.6.32.42/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
58862 +++ linux-2.6.32.42/kernel/cred.c 2011-05-17 19:26:34.000000000 -0400
58863 @@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
58864 */
58865 void __put_cred(struct cred *cred)
58866 {
58867 + pax_track_stack();
58868 +
58869 kdebug("__put_cred(%p{%d,%d})", cred,
58870 atomic_read(&cred->usage),
58871 read_cred_subscribers(cred));
58872 @@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
58873 {
58874 struct cred *cred;
58875
58876 + pax_track_stack();
58877 +
58878 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
58879 atomic_read(&tsk->cred->usage),
58880 read_cred_subscribers(tsk->cred));
58881 @@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
58882 {
58883 const struct cred *cred;
58884
58885 + pax_track_stack();
58886 +
58887 rcu_read_lock();
58888
58889 do {
58890 @@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
58891 {
58892 struct cred *new;
58893
58894 + pax_track_stack();
58895 +
58896 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
58897 if (!new)
58898 return NULL;
58899 @@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
58900 const struct cred *old;
58901 struct cred *new;
58902
58903 + pax_track_stack();
58904 +
58905 validate_process_creds();
58906
58907 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
58908 @@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
58909 struct thread_group_cred *tgcred = NULL;
58910 struct cred *new;
58911
58912 + pax_track_stack();
58913 +
58914 #ifdef CONFIG_KEYS
58915 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
58916 if (!tgcred)
58917 @@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
58918 struct cred *new;
58919 int ret;
58920
58921 + pax_track_stack();
58922 +
58923 mutex_init(&p->cred_guard_mutex);
58924
58925 if (
58926 @@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
58927 struct task_struct *task = current;
58928 const struct cred *old = task->real_cred;
58929
58930 + pax_track_stack();
58931 +
58932 kdebug("commit_creds(%p{%d,%d})", new,
58933 atomic_read(&new->usage),
58934 read_cred_subscribers(new));
58935 @@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
58936
58937 get_cred(new); /* we will require a ref for the subj creds too */
58938
58939 + gr_set_role_label(task, new->uid, new->gid);
58940 +
58941 /* dumpability changes */
58942 if (old->euid != new->euid ||
58943 old->egid != new->egid ||
58944 @@ -606,6 +624,8 @@ EXPORT_SYMBOL(commit_creds);
58945 */
58946 void abort_creds(struct cred *new)
58947 {
58948 + pax_track_stack();
58949 +
58950 kdebug("abort_creds(%p{%d,%d})", new,
58951 atomic_read(&new->usage),
58952 read_cred_subscribers(new));
58953 @@ -629,6 +649,8 @@ const struct cred *override_creds(const
58954 {
58955 const struct cred *old = current->cred;
58956
58957 + pax_track_stack();
58958 +
58959 kdebug("override_creds(%p{%d,%d})", new,
58960 atomic_read(&new->usage),
58961 read_cred_subscribers(new));
58962 @@ -658,6 +680,8 @@ void revert_creds(const struct cred *old
58963 {
58964 const struct cred *override = current->cred;
58965
58966 + pax_track_stack();
58967 +
58968 kdebug("revert_creds(%p{%d,%d})", old,
58969 atomic_read(&old->usage),
58970 read_cred_subscribers(old));
58971 @@ -704,6 +728,8 @@ struct cred *prepare_kernel_cred(struct
58972 const struct cred *old;
58973 struct cred *new;
58974
58975 + pax_track_stack();
58976 +
58977 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
58978 if (!new)
58979 return NULL;
58980 @@ -758,6 +784,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
58981 */
58982 int set_security_override(struct cred *new, u32 secid)
58983 {
58984 + pax_track_stack();
58985 +
58986 return security_kernel_act_as(new, secid);
58987 }
58988 EXPORT_SYMBOL(set_security_override);
58989 @@ -777,6 +805,8 @@ int set_security_override_from_ctx(struc
58990 u32 secid;
58991 int ret;
58992
58993 + pax_track_stack();
58994 +
58995 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
58996 if (ret < 0)
58997 return ret;
58998 diff -urNp linux-2.6.32.42/kernel/exit.c linux-2.6.32.42/kernel/exit.c
58999 --- linux-2.6.32.42/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
59000 +++ linux-2.6.32.42/kernel/exit.c 2011-04-17 15:56:46.000000000 -0400
59001 @@ -55,6 +55,10 @@
59002 #include <asm/pgtable.h>
59003 #include <asm/mmu_context.h>
59004
59005 +#ifdef CONFIG_GRKERNSEC
59006 +extern rwlock_t grsec_exec_file_lock;
59007 +#endif
59008 +
59009 static void exit_mm(struct task_struct * tsk);
59010
59011 static void __unhash_process(struct task_struct *p)
59012 @@ -174,6 +178,8 @@ void release_task(struct task_struct * p
59013 struct task_struct *leader;
59014 int zap_leader;
59015 repeat:
59016 + gr_del_task_from_ip_table(p);
59017 +
59018 tracehook_prepare_release_task(p);
59019 /* don't need to get the RCU readlock here - the process is dead and
59020 * can't be modifying its own credentials */
59021 @@ -341,11 +347,22 @@ static void reparent_to_kthreadd(void)
59022 {
59023 write_lock_irq(&tasklist_lock);
59024
59025 +#ifdef CONFIG_GRKERNSEC
59026 + write_lock(&grsec_exec_file_lock);
59027 + if (current->exec_file) {
59028 + fput(current->exec_file);
59029 + current->exec_file = NULL;
59030 + }
59031 + write_unlock(&grsec_exec_file_lock);
59032 +#endif
59033 +
59034 ptrace_unlink(current);
59035 /* Reparent to init */
59036 current->real_parent = current->parent = kthreadd_task;
59037 list_move_tail(&current->sibling, &current->real_parent->children);
59038
59039 + gr_set_kernel_label(current);
59040 +
59041 /* Set the exit signal to SIGCHLD so we signal init on exit */
59042 current->exit_signal = SIGCHLD;
59043
59044 @@ -397,7 +414,7 @@ int allow_signal(int sig)
59045 * know it'll be handled, so that they don't get converted to
59046 * SIGKILL or just silently dropped.
59047 */
59048 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
59049 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
59050 recalc_sigpending();
59051 spin_unlock_irq(&current->sighand->siglock);
59052 return 0;
59053 @@ -433,6 +450,17 @@ void daemonize(const char *name, ...)
59054 vsnprintf(current->comm, sizeof(current->comm), name, args);
59055 va_end(args);
59056
59057 +#ifdef CONFIG_GRKERNSEC
59058 + write_lock(&grsec_exec_file_lock);
59059 + if (current->exec_file) {
59060 + fput(current->exec_file);
59061 + current->exec_file = NULL;
59062 + }
59063 + write_unlock(&grsec_exec_file_lock);
59064 +#endif
59065 +
59066 + gr_set_kernel_label(current);
59067 +
59068 /*
59069 * If we were started as result of loading a module, close all of the
59070 * user space pages. We don't need them, and if we didn't close them
59071 @@ -897,17 +925,17 @@ NORET_TYPE void do_exit(long code)
59072 struct task_struct *tsk = current;
59073 int group_dead;
59074
59075 - profile_task_exit(tsk);
59076 -
59077 - WARN_ON(atomic_read(&tsk->fs_excl));
59078 -
59079 + /*
59080 + * Check this first since set_fs() below depends on
59081 + * current_thread_info(), which we better not access when we're in
59082 + * interrupt context. Other than that, we want to do the set_fs()
59083 + * as early as possible.
59084 + */
59085 if (unlikely(in_interrupt()))
59086 panic("Aiee, killing interrupt handler!");
59087 - if (unlikely(!tsk->pid))
59088 - panic("Attempted to kill the idle task!");
59089
59090 /*
59091 - * If do_exit is called because this processes oopsed, it's possible
59092 + * If do_exit is called because this processes Oops'ed, it's possible
59093 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
59094 * continuing. Amongst other possible reasons, this is to prevent
59095 * mm_release()->clear_child_tid() from writing to a user-controlled
59096 @@ -915,6 +943,13 @@ NORET_TYPE void do_exit(long code)
59097 */
59098 set_fs(USER_DS);
59099
59100 + profile_task_exit(tsk);
59101 +
59102 + WARN_ON(atomic_read(&tsk->fs_excl));
59103 +
59104 + if (unlikely(!tsk->pid))
59105 + panic("Attempted to kill the idle task!");
59106 +
59107 tracehook_report_exit(&code);
59108
59109 validate_creds_for_do_exit(tsk);
59110 @@ -973,6 +1008,9 @@ NORET_TYPE void do_exit(long code)
59111 tsk->exit_code = code;
59112 taskstats_exit(tsk, group_dead);
59113
59114 + gr_acl_handle_psacct(tsk, code);
59115 + gr_acl_handle_exit();
59116 +
59117 exit_mm(tsk);
59118
59119 if (group_dead)
59120 @@ -1188,7 +1226,7 @@ static int wait_task_zombie(struct wait_
59121
59122 if (unlikely(wo->wo_flags & WNOWAIT)) {
59123 int exit_code = p->exit_code;
59124 - int why, status;
59125 + int why;
59126
59127 get_task_struct(p);
59128 read_unlock(&tasklist_lock);
59129 diff -urNp linux-2.6.32.42/kernel/fork.c linux-2.6.32.42/kernel/fork.c
59130 --- linux-2.6.32.42/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
59131 +++ linux-2.6.32.42/kernel/fork.c 2011-04-17 15:56:46.000000000 -0400
59132 @@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
59133 *stackend = STACK_END_MAGIC; /* for overflow detection */
59134
59135 #ifdef CONFIG_CC_STACKPROTECTOR
59136 - tsk->stack_canary = get_random_int();
59137 + tsk->stack_canary = pax_get_random_long();
59138 #endif
59139
59140 /* One for us, one for whoever does the "release_task()" (usually parent) */
59141 @@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
59142 mm->locked_vm = 0;
59143 mm->mmap = NULL;
59144 mm->mmap_cache = NULL;
59145 - mm->free_area_cache = oldmm->mmap_base;
59146 - mm->cached_hole_size = ~0UL;
59147 + mm->free_area_cache = oldmm->free_area_cache;
59148 + mm->cached_hole_size = oldmm->cached_hole_size;
59149 mm->map_count = 0;
59150 cpumask_clear(mm_cpumask(mm));
59151 mm->mm_rb = RB_ROOT;
59152 @@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
59153 tmp->vm_flags &= ~VM_LOCKED;
59154 tmp->vm_mm = mm;
59155 tmp->vm_next = tmp->vm_prev = NULL;
59156 + tmp->vm_mirror = NULL;
59157 anon_vma_link(tmp);
59158 file = tmp->vm_file;
59159 if (file) {
59160 @@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
59161 if (retval)
59162 goto out;
59163 }
59164 +
59165 +#ifdef CONFIG_PAX_SEGMEXEC
59166 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
59167 + struct vm_area_struct *mpnt_m;
59168 +
59169 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
59170 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
59171 +
59172 + if (!mpnt->vm_mirror)
59173 + continue;
59174 +
59175 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
59176 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
59177 + mpnt->vm_mirror = mpnt_m;
59178 + } else {
59179 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
59180 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
59181 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
59182 + mpnt->vm_mirror->vm_mirror = mpnt;
59183 + }
59184 + }
59185 + BUG_ON(mpnt_m);
59186 + }
59187 +#endif
59188 +
59189 /* a new mm has just been created */
59190 arch_dup_mmap(oldmm, mm);
59191 retval = 0;
59192 @@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
59193 write_unlock(&fs->lock);
59194 return -EAGAIN;
59195 }
59196 - fs->users++;
59197 + atomic_inc(&fs->users);
59198 write_unlock(&fs->lock);
59199 return 0;
59200 }
59201 tsk->fs = copy_fs_struct(fs);
59202 if (!tsk->fs)
59203 return -ENOMEM;
59204 + gr_set_chroot_entries(tsk, &tsk->fs->root);
59205 return 0;
59206 }
59207
59208 @@ -1033,10 +1060,13 @@ static struct task_struct *copy_process(
59209 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
59210 #endif
59211 retval = -EAGAIN;
59212 +
59213 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
59214 +
59215 if (atomic_read(&p->real_cred->user->processes) >=
59216 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
59217 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
59218 - p->real_cred->user != INIT_USER)
59219 + if (p->real_cred->user != INIT_USER &&
59220 + !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
59221 goto bad_fork_free;
59222 }
59223
59224 @@ -1183,6 +1213,8 @@ static struct task_struct *copy_process(
59225 goto bad_fork_free_pid;
59226 }
59227
59228 + gr_copy_label(p);
59229 +
59230 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
59231 /*
59232 * Clear TID on mm_release()?
59233 @@ -1333,6 +1365,8 @@ bad_fork_cleanup_count:
59234 bad_fork_free:
59235 free_task(p);
59236 fork_out:
59237 + gr_log_forkfail(retval);
59238 +
59239 return ERR_PTR(retval);
59240 }
59241
59242 @@ -1426,6 +1460,8 @@ long do_fork(unsigned long clone_flags,
59243 if (clone_flags & CLONE_PARENT_SETTID)
59244 put_user(nr, parent_tidptr);
59245
59246 + gr_handle_brute_check();
59247 +
59248 if (clone_flags & CLONE_VFORK) {
59249 p->vfork_done = &vfork;
59250 init_completion(&vfork);
59251 @@ -1558,7 +1594,7 @@ static int unshare_fs(unsigned long unsh
59252 return 0;
59253
59254 /* don't need lock here; in the worst case we'll do useless copy */
59255 - if (fs->users == 1)
59256 + if (atomic_read(&fs->users) == 1)
59257 return 0;
59258
59259 *new_fsp = copy_fs_struct(fs);
59260 @@ -1681,7 +1717,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
59261 fs = current->fs;
59262 write_lock(&fs->lock);
59263 current->fs = new_fs;
59264 - if (--fs->users)
59265 + gr_set_chroot_entries(current, &current->fs->root);
59266 + if (atomic_dec_return(&fs->users))
59267 new_fs = NULL;
59268 else
59269 new_fs = fs;
59270 diff -urNp linux-2.6.32.42/kernel/futex.c linux-2.6.32.42/kernel/futex.c
59271 --- linux-2.6.32.42/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
59272 +++ linux-2.6.32.42/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
59273 @@ -54,6 +54,7 @@
59274 #include <linux/mount.h>
59275 #include <linux/pagemap.h>
59276 #include <linux/syscalls.h>
59277 +#include <linux/ptrace.h>
59278 #include <linux/signal.h>
59279 #include <linux/module.h>
59280 #include <linux/magic.h>
59281 @@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
59282 struct page *page;
59283 int err;
59284
59285 +#ifdef CONFIG_PAX_SEGMEXEC
59286 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
59287 + return -EFAULT;
59288 +#endif
59289 +
59290 /*
59291 * The futex address must be "naturally" aligned.
59292 */
59293 @@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
59294 struct futex_q q;
59295 int ret;
59296
59297 + pax_track_stack();
59298 +
59299 if (!bitset)
59300 return -EINVAL;
59301
59302 @@ -1841,7 +1849,7 @@ retry:
59303
59304 restart = &current_thread_info()->restart_block;
59305 restart->fn = futex_wait_restart;
59306 - restart->futex.uaddr = (u32 *)uaddr;
59307 + restart->futex.uaddr = uaddr;
59308 restart->futex.val = val;
59309 restart->futex.time = abs_time->tv64;
59310 restart->futex.bitset = bitset;
59311 @@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
59312 struct futex_q q;
59313 int res, ret;
59314
59315 + pax_track_stack();
59316 +
59317 if (!bitset)
59318 return -EINVAL;
59319
59320 @@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59321 {
59322 struct robust_list_head __user *head;
59323 unsigned long ret;
59324 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59325 const struct cred *cred = current_cred(), *pcred;
59326 +#endif
59327
59328 if (!futex_cmpxchg_enabled)
59329 return -ENOSYS;
59330 @@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
59331 if (!p)
59332 goto err_unlock;
59333 ret = -EPERM;
59334 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59335 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
59336 + goto err_unlock;
59337 +#else
59338 pcred = __task_cred(p);
59339 if (cred->euid != pcred->euid &&
59340 cred->euid != pcred->uid &&
59341 !capable(CAP_SYS_PTRACE))
59342 goto err_unlock;
59343 +#endif
59344 head = p->robust_list;
59345 rcu_read_unlock();
59346 }
59347 @@ -2459,7 +2476,7 @@ retry:
59348 */
59349 static inline int fetch_robust_entry(struct robust_list __user **entry,
59350 struct robust_list __user * __user *head,
59351 - int *pi)
59352 + unsigned int *pi)
59353 {
59354 unsigned long uentry;
59355
59356 @@ -2640,6 +2657,7 @@ static int __init futex_init(void)
59357 {
59358 u32 curval;
59359 int i;
59360 + mm_segment_t oldfs;
59361
59362 /*
59363 * This will fail and we want it. Some arch implementations do
59364 @@ -2651,7 +2669,10 @@ static int __init futex_init(void)
59365 * implementation, the non functional ones will return
59366 * -ENOSYS.
59367 */
59368 + oldfs = get_fs();
59369 + set_fs(USER_DS);
59370 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
59371 + set_fs(oldfs);
59372 if (curval == -EFAULT)
59373 futex_cmpxchg_enabled = 1;
59374
59375 diff -urNp linux-2.6.32.42/kernel/futex_compat.c linux-2.6.32.42/kernel/futex_compat.c
59376 --- linux-2.6.32.42/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
59377 +++ linux-2.6.32.42/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
59378 @@ -10,6 +10,7 @@
59379 #include <linux/compat.h>
59380 #include <linux/nsproxy.h>
59381 #include <linux/futex.h>
59382 +#include <linux/ptrace.h>
59383
59384 #include <asm/uaccess.h>
59385
59386 @@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
59387 {
59388 struct compat_robust_list_head __user *head;
59389 unsigned long ret;
59390 - const struct cred *cred = current_cred(), *pcred;
59391 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
59392 + const struct cred *cred = current_cred();
59393 + const struct cred *pcred;
59394 +#endif
59395
59396 if (!futex_cmpxchg_enabled)
59397 return -ENOSYS;
59398 @@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
59399 if (!p)
59400 goto err_unlock;
59401 ret = -EPERM;
59402 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
59403 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
59404 + goto err_unlock;
59405 +#else
59406 pcred = __task_cred(p);
59407 if (cred->euid != pcred->euid &&
59408 cred->euid != pcred->uid &&
59409 !capable(CAP_SYS_PTRACE))
59410 goto err_unlock;
59411 +#endif
59412 head = p->compat_robust_list;
59413 read_unlock(&tasklist_lock);
59414 }
59415 diff -urNp linux-2.6.32.42/kernel/gcov/base.c linux-2.6.32.42/kernel/gcov/base.c
59416 --- linux-2.6.32.42/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
59417 +++ linux-2.6.32.42/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
59418 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
59419 }
59420
59421 #ifdef CONFIG_MODULES
59422 -static inline int within(void *addr, void *start, unsigned long size)
59423 -{
59424 - return ((addr >= start) && (addr < start + size));
59425 -}
59426 -
59427 /* Update list and generate events when modules are unloaded. */
59428 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
59429 void *data)
59430 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
59431 prev = NULL;
59432 /* Remove entries located in module from linked list. */
59433 for (info = gcov_info_head; info; info = info->next) {
59434 - if (within(info, mod->module_core, mod->core_size)) {
59435 + if (within_module_core_rw((unsigned long)info, mod)) {
59436 if (prev)
59437 prev->next = info->next;
59438 else
59439 diff -urNp linux-2.6.32.42/kernel/hrtimer.c linux-2.6.32.42/kernel/hrtimer.c
59440 --- linux-2.6.32.42/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
59441 +++ linux-2.6.32.42/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
59442 @@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
59443 local_irq_restore(flags);
59444 }
59445
59446 -static void run_hrtimer_softirq(struct softirq_action *h)
59447 +static void run_hrtimer_softirq(void)
59448 {
59449 hrtimer_peek_ahead_timers();
59450 }
59451 diff -urNp linux-2.6.32.42/kernel/kallsyms.c linux-2.6.32.42/kernel/kallsyms.c
59452 --- linux-2.6.32.42/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
59453 +++ linux-2.6.32.42/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
59454 @@ -11,6 +11,9 @@
59455 * Changed the compression method from stem compression to "table lookup"
59456 * compression (see scripts/kallsyms.c for a more complete description)
59457 */
59458 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59459 +#define __INCLUDED_BY_HIDESYM 1
59460 +#endif
59461 #include <linux/kallsyms.h>
59462 #include <linux/module.h>
59463 #include <linux/init.h>
59464 @@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
59465
59466 static inline int is_kernel_inittext(unsigned long addr)
59467 {
59468 + if (system_state != SYSTEM_BOOTING)
59469 + return 0;
59470 +
59471 if (addr >= (unsigned long)_sinittext
59472 && addr <= (unsigned long)_einittext)
59473 return 1;
59474 return 0;
59475 }
59476
59477 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59478 +#ifdef CONFIG_MODULES
59479 +static inline int is_module_text(unsigned long addr)
59480 +{
59481 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
59482 + return 1;
59483 +
59484 + addr = ktla_ktva(addr);
59485 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
59486 +}
59487 +#else
59488 +static inline int is_module_text(unsigned long addr)
59489 +{
59490 + return 0;
59491 +}
59492 +#endif
59493 +#endif
59494 +
59495 static inline int is_kernel_text(unsigned long addr)
59496 {
59497 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
59498 @@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
59499
59500 static inline int is_kernel(unsigned long addr)
59501 {
59502 +
59503 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59504 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
59505 + return 1;
59506 +
59507 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
59508 +#else
59509 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
59510 +#endif
59511 +
59512 return 1;
59513 return in_gate_area_no_task(addr);
59514 }
59515
59516 static int is_ksym_addr(unsigned long addr)
59517 {
59518 +
59519 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
59520 + if (is_module_text(addr))
59521 + return 0;
59522 +#endif
59523 +
59524 if (all_var)
59525 return is_kernel(addr);
59526
59527 @@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
59528
59529 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
59530 {
59531 - iter->name[0] = '\0';
59532 iter->nameoff = get_symbol_offset(new_pos);
59533 iter->pos = new_pos;
59534 }
59535 @@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
59536 {
59537 struct kallsym_iter *iter = m->private;
59538
59539 +#ifdef CONFIG_GRKERNSEC_HIDESYM
59540 + if (current_uid())
59541 + return 0;
59542 +#endif
59543 +
59544 /* Some debugging symbols have no name. Ignore them. */
59545 if (!iter->name[0])
59546 return 0;
59547 @@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
59548 struct kallsym_iter *iter;
59549 int ret;
59550
59551 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
59552 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
59553 if (!iter)
59554 return -ENOMEM;
59555 reset_iter(iter, 0);
59556 diff -urNp linux-2.6.32.42/kernel/kgdb.c linux-2.6.32.42/kernel/kgdb.c
59557 --- linux-2.6.32.42/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
59558 +++ linux-2.6.32.42/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
59559 @@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
59560 /* Guard for recursive entry */
59561 static int exception_level;
59562
59563 -static struct kgdb_io *kgdb_io_ops;
59564 +static const struct kgdb_io *kgdb_io_ops;
59565 static DEFINE_SPINLOCK(kgdb_registration_lock);
59566
59567 /* kgdb console driver is loaded */
59568 @@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
59569 */
59570 static atomic_t passive_cpu_wait[NR_CPUS];
59571 static atomic_t cpu_in_kgdb[NR_CPUS];
59572 -atomic_t kgdb_setting_breakpoint;
59573 +atomic_unchecked_t kgdb_setting_breakpoint;
59574
59575 struct task_struct *kgdb_usethread;
59576 struct task_struct *kgdb_contthread;
59577 @@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
59578 sizeof(unsigned long)];
59579
59580 /* to keep track of the CPU which is doing the single stepping*/
59581 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59582 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
59583
59584 /*
59585 * If you are debugging a problem where roundup (the collection of
59586 @@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
59587 return 0;
59588 if (kgdb_connected)
59589 return 1;
59590 - if (atomic_read(&kgdb_setting_breakpoint))
59591 + if (atomic_read_unchecked(&kgdb_setting_breakpoint))
59592 return 1;
59593 if (print_wait)
59594 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
59595 @@ -1426,8 +1426,8 @@ acquirelock:
59596 * instance of the exception handler wanted to come into the
59597 * debugger on a different CPU via a single step
59598 */
59599 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
59600 - atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
59601 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
59602 + atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
59603
59604 atomic_set(&kgdb_active, -1);
59605 touch_softlockup_watchdog();
59606 @@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
59607 *
59608 * Register it with the KGDB core.
59609 */
59610 -int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
59611 +int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
59612 {
59613 int err;
59614
59615 @@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
59616 *
59617 * Unregister it with the KGDB core.
59618 */
59619 -void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
59620 +void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
59621 {
59622 BUG_ON(kgdb_connected);
59623
59624 @@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
59625 */
59626 void kgdb_breakpoint(void)
59627 {
59628 - atomic_set(&kgdb_setting_breakpoint, 1);
59629 + atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
59630 wmb(); /* Sync point before breakpoint */
59631 arch_kgdb_breakpoint();
59632 wmb(); /* Sync point after breakpoint */
59633 - atomic_set(&kgdb_setting_breakpoint, 0);
59634 + atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
59635 }
59636 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
59637
59638 diff -urNp linux-2.6.32.42/kernel/kmod.c linux-2.6.32.42/kernel/kmod.c
59639 --- linux-2.6.32.42/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
59640 +++ linux-2.6.32.42/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
59641 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
59642 * If module auto-loading support is disabled then this function
59643 * becomes a no-operation.
59644 */
59645 -int __request_module(bool wait, const char *fmt, ...)
59646 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
59647 {
59648 - va_list args;
59649 char module_name[MODULE_NAME_LEN];
59650 unsigned int max_modprobes;
59651 int ret;
59652 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
59653 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
59654 static char *envp[] = { "HOME=/",
59655 "TERM=linux",
59656 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
59657 @@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
59658 if (ret)
59659 return ret;
59660
59661 - va_start(args, fmt);
59662 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
59663 - va_end(args);
59664 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
59665 if (ret >= MODULE_NAME_LEN)
59666 return -ENAMETOOLONG;
59667
59668 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59669 + if (!current_uid()) {
59670 + /* hack to workaround consolekit/udisks stupidity */
59671 + read_lock(&tasklist_lock);
59672 + if (!strcmp(current->comm, "mount") &&
59673 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
59674 + read_unlock(&tasklist_lock);
59675 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
59676 + return -EPERM;
59677 + }
59678 + read_unlock(&tasklist_lock);
59679 + }
59680 +#endif
59681 +
59682 /* If modprobe needs a service that is in a module, we get a recursive
59683 * loop. Limit the number of running kmod threads to max_threads/2 or
59684 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
59685 @@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
59686 atomic_dec(&kmod_concurrent);
59687 return ret;
59688 }
59689 +
59690 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
59691 +{
59692 + va_list args;
59693 + int ret;
59694 +
59695 + va_start(args, fmt);
59696 + ret = ____request_module(wait, module_param, fmt, args);
59697 + va_end(args);
59698 +
59699 + return ret;
59700 +}
59701 +
59702 +int __request_module(bool wait, const char *fmt, ...)
59703 +{
59704 + va_list args;
59705 + int ret;
59706 +
59707 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59708 + if (current_uid()) {
59709 + char module_param[MODULE_NAME_LEN];
59710 +
59711 + memset(module_param, 0, sizeof(module_param));
59712 +
59713 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
59714 +
59715 + va_start(args, fmt);
59716 + ret = ____request_module(wait, module_param, fmt, args);
59717 + va_end(args);
59718 +
59719 + return ret;
59720 + }
59721 +#endif
59722 +
59723 + va_start(args, fmt);
59724 + ret = ____request_module(wait, NULL, fmt, args);
59725 + va_end(args);
59726 +
59727 + return ret;
59728 +}
59729 +
59730 +
59731 EXPORT_SYMBOL(__request_module);
59732 #endif /* CONFIG_MODULES */
59733
59734 diff -urNp linux-2.6.32.42/kernel/kprobes.c linux-2.6.32.42/kernel/kprobes.c
59735 --- linux-2.6.32.42/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
59736 +++ linux-2.6.32.42/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
59737 @@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
59738 * kernel image and loaded module images reside. This is required
59739 * so x86_64 can correctly handle the %rip-relative fixups.
59740 */
59741 - kip->insns = module_alloc(PAGE_SIZE);
59742 + kip->insns = module_alloc_exec(PAGE_SIZE);
59743 if (!kip->insns) {
59744 kfree(kip);
59745 return NULL;
59746 @@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
59747 */
59748 if (!list_is_singular(&kprobe_insn_pages)) {
59749 list_del(&kip->list);
59750 - module_free(NULL, kip->insns);
59751 + module_free_exec(NULL, kip->insns);
59752 kfree(kip);
59753 }
59754 return 1;
59755 @@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
59756 {
59757 int i, err = 0;
59758 unsigned long offset = 0, size = 0;
59759 - char *modname, namebuf[128];
59760 + char *modname, namebuf[KSYM_NAME_LEN];
59761 const char *symbol_name;
59762 void *addr;
59763 struct kprobe_blackpoint *kb;
59764 @@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
59765 const char *sym = NULL;
59766 unsigned int i = *(loff_t *) v;
59767 unsigned long offset = 0;
59768 - char *modname, namebuf[128];
59769 + char *modname, namebuf[KSYM_NAME_LEN];
59770
59771 head = &kprobe_table[i];
59772 preempt_disable();
59773 diff -urNp linux-2.6.32.42/kernel/lockdep.c linux-2.6.32.42/kernel/lockdep.c
59774 --- linux-2.6.32.42/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
59775 +++ linux-2.6.32.42/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
59776 @@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
59777 /*
59778 * Various lockdep statistics:
59779 */
59780 -atomic_t chain_lookup_hits;
59781 -atomic_t chain_lookup_misses;
59782 -atomic_t hardirqs_on_events;
59783 -atomic_t hardirqs_off_events;
59784 -atomic_t redundant_hardirqs_on;
59785 -atomic_t redundant_hardirqs_off;
59786 -atomic_t softirqs_on_events;
59787 -atomic_t softirqs_off_events;
59788 -atomic_t redundant_softirqs_on;
59789 -atomic_t redundant_softirqs_off;
59790 -atomic_t nr_unused_locks;
59791 -atomic_t nr_cyclic_checks;
59792 -atomic_t nr_find_usage_forwards_checks;
59793 -atomic_t nr_find_usage_backwards_checks;
59794 +atomic_unchecked_t chain_lookup_hits;
59795 +atomic_unchecked_t chain_lookup_misses;
59796 +atomic_unchecked_t hardirqs_on_events;
59797 +atomic_unchecked_t hardirqs_off_events;
59798 +atomic_unchecked_t redundant_hardirqs_on;
59799 +atomic_unchecked_t redundant_hardirqs_off;
59800 +atomic_unchecked_t softirqs_on_events;
59801 +atomic_unchecked_t softirqs_off_events;
59802 +atomic_unchecked_t redundant_softirqs_on;
59803 +atomic_unchecked_t redundant_softirqs_off;
59804 +atomic_unchecked_t nr_unused_locks;
59805 +atomic_unchecked_t nr_cyclic_checks;
59806 +atomic_unchecked_t nr_find_usage_forwards_checks;
59807 +atomic_unchecked_t nr_find_usage_backwards_checks;
59808 #endif
59809
59810 /*
59811 @@ -577,6 +577,10 @@ static int static_obj(void *obj)
59812 int i;
59813 #endif
59814
59815 +#ifdef CONFIG_PAX_KERNEXEC
59816 + start = ktla_ktva(start);
59817 +#endif
59818 +
59819 /*
59820 * static variable?
59821 */
59822 @@ -592,8 +596,7 @@ static int static_obj(void *obj)
59823 */
59824 for_each_possible_cpu(i) {
59825 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
59826 - end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
59827 - + per_cpu_offset(i);
59828 + end = start + PERCPU_ENOUGH_ROOM;
59829
59830 if ((addr >= start) && (addr < end))
59831 return 1;
59832 @@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
59833 if (!static_obj(lock->key)) {
59834 debug_locks_off();
59835 printk("INFO: trying to register non-static key.\n");
59836 + printk("lock:%pS key:%pS.\n", lock, lock->key);
59837 printk("the code is fine but needs lockdep annotation.\n");
59838 printk("turning off the locking correctness validator.\n");
59839 dump_stack();
59840 @@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
59841 if (!class)
59842 return 0;
59843 }
59844 - debug_atomic_inc((atomic_t *)&class->ops);
59845 + debug_atomic_inc((atomic_unchecked_t *)&class->ops);
59846 if (very_verbose(class)) {
59847 printk("\nacquire class [%p] %s", class->key, class->name);
59848 if (class->name_version > 1)
59849 diff -urNp linux-2.6.32.42/kernel/lockdep_internals.h linux-2.6.32.42/kernel/lockdep_internals.h
59850 --- linux-2.6.32.42/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
59851 +++ linux-2.6.32.42/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
59852 @@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
59853 /*
59854 * Various lockdep statistics:
59855 */
59856 -extern atomic_t chain_lookup_hits;
59857 -extern atomic_t chain_lookup_misses;
59858 -extern atomic_t hardirqs_on_events;
59859 -extern atomic_t hardirqs_off_events;
59860 -extern atomic_t redundant_hardirqs_on;
59861 -extern atomic_t redundant_hardirqs_off;
59862 -extern atomic_t softirqs_on_events;
59863 -extern atomic_t softirqs_off_events;
59864 -extern atomic_t redundant_softirqs_on;
59865 -extern atomic_t redundant_softirqs_off;
59866 -extern atomic_t nr_unused_locks;
59867 -extern atomic_t nr_cyclic_checks;
59868 -extern atomic_t nr_cyclic_check_recursions;
59869 -extern atomic_t nr_find_usage_forwards_checks;
59870 -extern atomic_t nr_find_usage_forwards_recursions;
59871 -extern atomic_t nr_find_usage_backwards_checks;
59872 -extern atomic_t nr_find_usage_backwards_recursions;
59873 -# define debug_atomic_inc(ptr) atomic_inc(ptr)
59874 -# define debug_atomic_dec(ptr) atomic_dec(ptr)
59875 -# define debug_atomic_read(ptr) atomic_read(ptr)
59876 +extern atomic_unchecked_t chain_lookup_hits;
59877 +extern atomic_unchecked_t chain_lookup_misses;
59878 +extern atomic_unchecked_t hardirqs_on_events;
59879 +extern atomic_unchecked_t hardirqs_off_events;
59880 +extern atomic_unchecked_t redundant_hardirqs_on;
59881 +extern atomic_unchecked_t redundant_hardirqs_off;
59882 +extern atomic_unchecked_t softirqs_on_events;
59883 +extern atomic_unchecked_t softirqs_off_events;
59884 +extern atomic_unchecked_t redundant_softirqs_on;
59885 +extern atomic_unchecked_t redundant_softirqs_off;
59886 +extern atomic_unchecked_t nr_unused_locks;
59887 +extern atomic_unchecked_t nr_cyclic_checks;
59888 +extern atomic_unchecked_t nr_cyclic_check_recursions;
59889 +extern atomic_unchecked_t nr_find_usage_forwards_checks;
59890 +extern atomic_unchecked_t nr_find_usage_forwards_recursions;
59891 +extern atomic_unchecked_t nr_find_usage_backwards_checks;
59892 +extern atomic_unchecked_t nr_find_usage_backwards_recursions;
59893 +# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
59894 +# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
59895 +# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
59896 #else
59897 # define debug_atomic_inc(ptr) do { } while (0)
59898 # define debug_atomic_dec(ptr) do { } while (0)
59899 diff -urNp linux-2.6.32.42/kernel/lockdep_proc.c linux-2.6.32.42/kernel/lockdep_proc.c
59900 --- linux-2.6.32.42/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
59901 +++ linux-2.6.32.42/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
59902 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
59903
59904 static void print_name(struct seq_file *m, struct lock_class *class)
59905 {
59906 - char str[128];
59907 + char str[KSYM_NAME_LEN];
59908 const char *name = class->name;
59909
59910 if (!name) {
59911 diff -urNp linux-2.6.32.42/kernel/module.c linux-2.6.32.42/kernel/module.c
59912 --- linux-2.6.32.42/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
59913 +++ linux-2.6.32.42/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
59914 @@ -55,6 +55,7 @@
59915 #include <linux/async.h>
59916 #include <linux/percpu.h>
59917 #include <linux/kmemleak.h>
59918 +#include <linux/grsecurity.h>
59919
59920 #define CREATE_TRACE_POINTS
59921 #include <trace/events/module.h>
59922 @@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
59923 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
59924
59925 /* Bounds of module allocation, for speeding __module_address */
59926 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
59927 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
59928 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
59929
59930 int register_module_notifier(struct notifier_block * nb)
59931 {
59932 @@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
59933 return true;
59934
59935 list_for_each_entry_rcu(mod, &modules, list) {
59936 - struct symsearch arr[] = {
59937 + struct symsearch modarr[] = {
59938 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
59939 NOT_GPL_ONLY, false },
59940 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
59941 @@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
59942 #endif
59943 };
59944
59945 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
59946 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
59947 return true;
59948 }
59949 return false;
59950 @@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
59951 void *ptr;
59952 int cpu;
59953
59954 - if (align > PAGE_SIZE) {
59955 + if (align-1 >= PAGE_SIZE) {
59956 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
59957 name, align, PAGE_SIZE);
59958 align = PAGE_SIZE;
59959 @@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
59960 * /sys/module/foo/sections stuff
59961 * J. Corbet <corbet@lwn.net>
59962 */
59963 -#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
59964 +#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59965
59966 static inline bool sect_empty(const Elf_Shdr *sect)
59967 {
59968 @@ -1545,7 +1547,8 @@ static void free_module(struct module *m
59969 destroy_params(mod->kp, mod->num_kp);
59970
59971 /* This may be NULL, but that's OK */
59972 - module_free(mod, mod->module_init);
59973 + module_free(mod, mod->module_init_rw);
59974 + module_free_exec(mod, mod->module_init_rx);
59975 kfree(mod->args);
59976 if (mod->percpu)
59977 percpu_modfree(mod->percpu);
59978 @@ -1554,10 +1557,12 @@ static void free_module(struct module *m
59979 percpu_modfree(mod->refptr);
59980 #endif
59981 /* Free lock-classes: */
59982 - lockdep_free_key_range(mod->module_core, mod->core_size);
59983 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
59984 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
59985
59986 /* Finally, free the core (containing the module structure) */
59987 - module_free(mod, mod->module_core);
59988 + module_free_exec(mod, mod->module_core_rx);
59989 + module_free(mod, mod->module_core_rw);
59990
59991 #ifdef CONFIG_MPU
59992 update_protections(current->mm);
59993 @@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
59994 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
59995 int ret = 0;
59996 const struct kernel_symbol *ksym;
59997 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
59998 + int is_fs_load = 0;
59999 + int register_filesystem_found = 0;
60000 + char *p;
60001 +
60002 + p = strstr(mod->args, "grsec_modharden_fs");
60003 +
60004 + if (p) {
60005 + char *endptr = p + strlen("grsec_modharden_fs");
60006 + /* copy \0 as well */
60007 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
60008 + is_fs_load = 1;
60009 + }
60010 +#endif
60011 +
60012
60013 for (i = 1; i < n; i++) {
60014 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60015 + const char *name = strtab + sym[i].st_name;
60016 +
60017 + /* it's a real shame this will never get ripped and copied
60018 + upstream! ;(
60019 + */
60020 + if (is_fs_load && !strcmp(name, "register_filesystem"))
60021 + register_filesystem_found = 1;
60022 +#endif
60023 switch (sym[i].st_shndx) {
60024 case SHN_COMMON:
60025 /* We compiled with -fno-common. These are not
60026 @@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
60027 strtab + sym[i].st_name, mod);
60028 /* Ok if resolved. */
60029 if (ksym) {
60030 + pax_open_kernel();
60031 sym[i].st_value = ksym->value;
60032 + pax_close_kernel();
60033 break;
60034 }
60035
60036 @@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
60037 secbase = (unsigned long)mod->percpu;
60038 else
60039 secbase = sechdrs[sym[i].st_shndx].sh_addr;
60040 + pax_open_kernel();
60041 sym[i].st_value += secbase;
60042 + pax_close_kernel();
60043 break;
60044 }
60045 }
60046
60047 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60048 + if (is_fs_load && !register_filesystem_found) {
60049 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
60050 + ret = -EPERM;
60051 + }
60052 +#endif
60053 +
60054 return ret;
60055 }
60056
60057 @@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
60058 || s->sh_entsize != ~0UL
60059 || strstarts(secstrings + s->sh_name, ".init"))
60060 continue;
60061 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
60062 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60063 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
60064 + else
60065 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
60066 DEBUGP("\t%s\n", secstrings + s->sh_name);
60067 }
60068 - if (m == 0)
60069 - mod->core_text_size = mod->core_size;
60070 }
60071
60072 DEBUGP("Init section allocation order:\n");
60073 @@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
60074 || s->sh_entsize != ~0UL
60075 || !strstarts(secstrings + s->sh_name, ".init"))
60076 continue;
60077 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
60078 - | INIT_OFFSET_MASK);
60079 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60080 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
60081 + else
60082 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
60083 + s->sh_entsize |= INIT_OFFSET_MASK;
60084 DEBUGP("\t%s\n", secstrings + s->sh_name);
60085 }
60086 - if (m == 0)
60087 - mod->init_text_size = mod->init_size;
60088 }
60089 }
60090
60091 @@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
60092
60093 /* As per nm */
60094 static char elf_type(const Elf_Sym *sym,
60095 - Elf_Shdr *sechdrs,
60096 - const char *secstrings,
60097 - struct module *mod)
60098 + const Elf_Shdr *sechdrs,
60099 + const char *secstrings)
60100 {
60101 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
60102 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
60103 @@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
60104
60105 /* Put symbol section at end of init part of module. */
60106 symsect->sh_flags |= SHF_ALLOC;
60107 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
60108 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
60109 symindex) | INIT_OFFSET_MASK;
60110 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
60111
60112 @@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
60113 }
60114
60115 /* Append room for core symbols at end of core part. */
60116 - symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
60117 - mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
60118 + symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
60119 + mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
60120
60121 /* Put string table section at end of init part of module. */
60122 strsect->sh_flags |= SHF_ALLOC;
60123 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
60124 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
60125 strindex) | INIT_OFFSET_MASK;
60126 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
60127
60128 /* Append room for core symbols' strings at end of core part. */
60129 - *pstroffs = mod->core_size;
60130 + *pstroffs = mod->core_size_rx;
60131 __set_bit(0, strmap);
60132 - mod->core_size += bitmap_weight(strmap, strsect->sh_size);
60133 + mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
60134
60135 return symoffs;
60136 }
60137 @@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
60138 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
60139 mod->strtab = (void *)sechdrs[strindex].sh_addr;
60140
60141 + pax_open_kernel();
60142 +
60143 /* Set types up while we still have access to sections. */
60144 for (i = 0; i < mod->num_symtab; i++)
60145 mod->symtab[i].st_info
60146 - = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
60147 + = elf_type(&mod->symtab[i], sechdrs, secstrings);
60148
60149 - mod->core_symtab = dst = mod->module_core + symoffs;
60150 + mod->core_symtab = dst = mod->module_core_rx + symoffs;
60151 src = mod->symtab;
60152 *dst = *src;
60153 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
60154 @@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
60155 }
60156 mod->core_num_syms = ndst;
60157
60158 - mod->core_strtab = s = mod->module_core + stroffs;
60159 + mod->core_strtab = s = mod->module_core_rx + stroffs;
60160 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
60161 if (test_bit(i, strmap))
60162 *++s = mod->strtab[i];
60163 +
60164 + pax_close_kernel();
60165 }
60166 #else
60167 static inline unsigned long layout_symtab(struct module *mod,
60168 @@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
60169 #endif
60170 }
60171
60172 -static void *module_alloc_update_bounds(unsigned long size)
60173 +static void *module_alloc_update_bounds_rw(unsigned long size)
60174 {
60175 void *ret = module_alloc(size);
60176
60177 if (ret) {
60178 /* Update module bounds. */
60179 - if ((unsigned long)ret < module_addr_min)
60180 - module_addr_min = (unsigned long)ret;
60181 - if ((unsigned long)ret + size > module_addr_max)
60182 - module_addr_max = (unsigned long)ret + size;
60183 + if ((unsigned long)ret < module_addr_min_rw)
60184 + module_addr_min_rw = (unsigned long)ret;
60185 + if ((unsigned long)ret + size > module_addr_max_rw)
60186 + module_addr_max_rw = (unsigned long)ret + size;
60187 + }
60188 + return ret;
60189 +}
60190 +
60191 +static void *module_alloc_update_bounds_rx(unsigned long size)
60192 +{
60193 + void *ret = module_alloc_exec(size);
60194 +
60195 + if (ret) {
60196 + /* Update module bounds. */
60197 + if ((unsigned long)ret < module_addr_min_rx)
60198 + module_addr_min_rx = (unsigned long)ret;
60199 + if ((unsigned long)ret + size > module_addr_max_rx)
60200 + module_addr_max_rx = (unsigned long)ret + size;
60201 }
60202 return ret;
60203 }
60204 @@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
60205 unsigned int i;
60206
60207 /* only scan the sections containing data */
60208 - kmemleak_scan_area(mod->module_core, (unsigned long)mod -
60209 - (unsigned long)mod->module_core,
60210 + kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
60211 + (unsigned long)mod->module_core_rw,
60212 sizeof(struct module), GFP_KERNEL);
60213
60214 for (i = 1; i < hdr->e_shnum; i++) {
60215 @@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
60216 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
60217 continue;
60218
60219 - kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
60220 - (unsigned long)mod->module_core,
60221 + kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
60222 + (unsigned long)mod->module_core_rw,
60223 sechdrs[i].sh_size, GFP_KERNEL);
60224 }
60225 }
60226 @@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
60227 secstrings, &stroffs, strmap);
60228
60229 /* Do the allocs. */
60230 - ptr = module_alloc_update_bounds(mod->core_size);
60231 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
60232 /*
60233 * The pointer to this block is stored in the module structure
60234 * which is inside the block. Just mark it as not being a
60235 @@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
60236 err = -ENOMEM;
60237 goto free_percpu;
60238 }
60239 - memset(ptr, 0, mod->core_size);
60240 - mod->module_core = ptr;
60241 + memset(ptr, 0, mod->core_size_rw);
60242 + mod->module_core_rw = ptr;
60243
60244 - ptr = module_alloc_update_bounds(mod->init_size);
60245 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
60246 /*
60247 * The pointer to this block is stored in the module structure
60248 * which is inside the block. This block doesn't need to be
60249 * scanned as it contains data and code that will be freed
60250 * after the module is initialized.
60251 */
60252 - kmemleak_ignore(ptr);
60253 - if (!ptr && mod->init_size) {
60254 + kmemleak_not_leak(ptr);
60255 + if (!ptr && mod->init_size_rw) {
60256 + err = -ENOMEM;
60257 + goto free_core_rw;
60258 + }
60259 + memset(ptr, 0, mod->init_size_rw);
60260 + mod->module_init_rw = ptr;
60261 +
60262 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
60263 + kmemleak_not_leak(ptr);
60264 + if (!ptr) {
60265 err = -ENOMEM;
60266 - goto free_core;
60267 + goto free_init_rw;
60268 }
60269 - memset(ptr, 0, mod->init_size);
60270 - mod->module_init = ptr;
60271 +
60272 + pax_open_kernel();
60273 + memset(ptr, 0, mod->core_size_rx);
60274 + pax_close_kernel();
60275 + mod->module_core_rx = ptr;
60276 +
60277 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
60278 + kmemleak_not_leak(ptr);
60279 + if (!ptr && mod->init_size_rx) {
60280 + err = -ENOMEM;
60281 + goto free_core_rx;
60282 + }
60283 +
60284 + pax_open_kernel();
60285 + memset(ptr, 0, mod->init_size_rx);
60286 + pax_close_kernel();
60287 + mod->module_init_rx = ptr;
60288
60289 /* Transfer each section which specifies SHF_ALLOC */
60290 DEBUGP("final section addresses:\n");
60291 @@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
60292 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
60293 continue;
60294
60295 - if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
60296 - dest = mod->module_init
60297 - + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60298 - else
60299 - dest = mod->module_core + sechdrs[i].sh_entsize;
60300 + if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
60301 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
60302 + dest = mod->module_init_rw
60303 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60304 + else
60305 + dest = mod->module_init_rx
60306 + + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
60307 + } else {
60308 + if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
60309 + dest = mod->module_core_rw + sechdrs[i].sh_entsize;
60310 + else
60311 + dest = mod->module_core_rx + sechdrs[i].sh_entsize;
60312 + }
60313 +
60314 + if (sechdrs[i].sh_type != SHT_NOBITS) {
60315
60316 - if (sechdrs[i].sh_type != SHT_NOBITS)
60317 - memcpy(dest, (void *)sechdrs[i].sh_addr,
60318 - sechdrs[i].sh_size);
60319 +#ifdef CONFIG_PAX_KERNEXEC
60320 +#ifdef CONFIG_X86_64
60321 + if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
60322 + set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
60323 +#endif
60324 + if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
60325 + pax_open_kernel();
60326 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
60327 + pax_close_kernel();
60328 + } else
60329 +#endif
60330 +
60331 + memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
60332 + }
60333 /* Update sh_addr to point to copy in image. */
60334 - sechdrs[i].sh_addr = (unsigned long)dest;
60335 +
60336 +#ifdef CONFIG_PAX_KERNEXEC
60337 + if (sechdrs[i].sh_flags & SHF_EXECINSTR)
60338 + sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
60339 + else
60340 +#endif
60341 +
60342 + sechdrs[i].sh_addr = (unsigned long)dest;
60343 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
60344 }
60345 /* Module has been moved. */
60346 @@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
60347 mod->name);
60348 if (!mod->refptr) {
60349 err = -ENOMEM;
60350 - goto free_init;
60351 + goto free_init_rx;
60352 }
60353 #endif
60354 /* Now we've moved module, initialize linked lists, etc. */
60355 @@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
60356 /* Set up MODINFO_ATTR fields */
60357 setup_modinfo(mod, sechdrs, infoindex);
60358
60359 + mod->args = args;
60360 +
60361 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
60362 + {
60363 + char *p, *p2;
60364 +
60365 + if (strstr(mod->args, "grsec_modharden_netdev")) {
60366 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
60367 + err = -EPERM;
60368 + goto cleanup;
60369 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
60370 + p += strlen("grsec_modharden_normal");
60371 + p2 = strstr(p, "_");
60372 + if (p2) {
60373 + *p2 = '\0';
60374 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
60375 + *p2 = '_';
60376 + }
60377 + err = -EPERM;
60378 + goto cleanup;
60379 + }
60380 + }
60381 +#endif
60382 +
60383 +
60384 /* Fix up syms, so that st_value is a pointer to location. */
60385 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
60386 mod);
60387 @@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
60388
60389 /* Now do relocations. */
60390 for (i = 1; i < hdr->e_shnum; i++) {
60391 - const char *strtab = (char *)sechdrs[strindex].sh_addr;
60392 unsigned int info = sechdrs[i].sh_info;
60393 + strtab = (char *)sechdrs[strindex].sh_addr;
60394
60395 /* Not a valid relocation section? */
60396 if (info >= hdr->e_shnum)
60397 @@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
60398 * Do it before processing of module parameters, so the module
60399 * can provide parameter accessor functions of its own.
60400 */
60401 - if (mod->module_init)
60402 - flush_icache_range((unsigned long)mod->module_init,
60403 - (unsigned long)mod->module_init
60404 - + mod->init_size);
60405 - flush_icache_range((unsigned long)mod->module_core,
60406 - (unsigned long)mod->module_core + mod->core_size);
60407 + if (mod->module_init_rx)
60408 + flush_icache_range((unsigned long)mod->module_init_rx,
60409 + (unsigned long)mod->module_init_rx
60410 + + mod->init_size_rx);
60411 + flush_icache_range((unsigned long)mod->module_core_rx,
60412 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
60413
60414 set_fs(old_fs);
60415
60416 - mod->args = args;
60417 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
60418 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
60419 mod->name);
60420 @@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
60421 free_unload:
60422 module_unload_free(mod);
60423 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
60424 + free_init_rx:
60425 percpu_modfree(mod->refptr);
60426 - free_init:
60427 #endif
60428 - module_free(mod, mod->module_init);
60429 - free_core:
60430 - module_free(mod, mod->module_core);
60431 + module_free_exec(mod, mod->module_init_rx);
60432 + free_core_rx:
60433 + module_free_exec(mod, mod->module_core_rx);
60434 + free_init_rw:
60435 + module_free(mod, mod->module_init_rw);
60436 + free_core_rw:
60437 + module_free(mod, mod->module_core_rw);
60438 /* mod will be freed with core. Don't access it beyond this line! */
60439 free_percpu:
60440 if (percpu)
60441 @@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
60442 mod->symtab = mod->core_symtab;
60443 mod->strtab = mod->core_strtab;
60444 #endif
60445 - module_free(mod, mod->module_init);
60446 - mod->module_init = NULL;
60447 - mod->init_size = 0;
60448 - mod->init_text_size = 0;
60449 + module_free(mod, mod->module_init_rw);
60450 + module_free_exec(mod, mod->module_init_rx);
60451 + mod->module_init_rw = NULL;
60452 + mod->module_init_rx = NULL;
60453 + mod->init_size_rw = 0;
60454 + mod->init_size_rx = 0;
60455 mutex_unlock(&module_mutex);
60456
60457 return 0;
60458 @@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
60459 unsigned long nextval;
60460
60461 /* At worse, next value is at end of module */
60462 - if (within_module_init(addr, mod))
60463 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
60464 + if (within_module_init_rx(addr, mod))
60465 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
60466 + else if (within_module_init_rw(addr, mod))
60467 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
60468 + else if (within_module_core_rx(addr, mod))
60469 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
60470 + else if (within_module_core_rw(addr, mod))
60471 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
60472 else
60473 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
60474 + return NULL;
60475
60476 /* Scan for closest preceeding symbol, and next symbol. (ELF
60477 starts real symbols at 1). */
60478 @@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
60479 char buf[8];
60480
60481 seq_printf(m, "%s %u",
60482 - mod->name, mod->init_size + mod->core_size);
60483 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
60484 print_unload_info(m, mod);
60485
60486 /* Informative for users. */
60487 @@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
60488 mod->state == MODULE_STATE_COMING ? "Loading":
60489 "Live");
60490 /* Used by oprofile and other similar tools. */
60491 - seq_printf(m, " 0x%p", mod->module_core);
60492 + seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
60493
60494 /* Taints info */
60495 if (mod->taints)
60496 @@ -2981,7 +3128,17 @@ static const struct file_operations proc
60497
60498 static int __init proc_modules_init(void)
60499 {
60500 +#ifndef CONFIG_GRKERNSEC_HIDESYM
60501 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60502 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60503 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60504 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
60505 +#else
60506 proc_create("modules", 0, NULL, &proc_modules_operations);
60507 +#endif
60508 +#else
60509 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
60510 +#endif
60511 return 0;
60512 }
60513 module_init(proc_modules_init);
60514 @@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
60515 {
60516 struct module *mod;
60517
60518 - if (addr < module_addr_min || addr > module_addr_max)
60519 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
60520 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
60521 return NULL;
60522
60523 list_for_each_entry_rcu(mod, &modules, list)
60524 - if (within_module_core(addr, mod)
60525 - || within_module_init(addr, mod))
60526 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
60527 return mod;
60528 return NULL;
60529 }
60530 @@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
60531 */
60532 struct module *__module_text_address(unsigned long addr)
60533 {
60534 - struct module *mod = __module_address(addr);
60535 + struct module *mod;
60536 +
60537 +#ifdef CONFIG_X86_32
60538 + addr = ktla_ktva(addr);
60539 +#endif
60540 +
60541 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
60542 + return NULL;
60543 +
60544 + mod = __module_address(addr);
60545 +
60546 if (mod) {
60547 /* Make sure it's within the text section. */
60548 - if (!within(addr, mod->module_init, mod->init_text_size)
60549 - && !within(addr, mod->module_core, mod->core_text_size))
60550 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
60551 mod = NULL;
60552 }
60553 return mod;
60554 diff -urNp linux-2.6.32.42/kernel/mutex.c linux-2.6.32.42/kernel/mutex.c
60555 --- linux-2.6.32.42/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
60556 +++ linux-2.6.32.42/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
60557 @@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
60558 */
60559
60560 for (;;) {
60561 - struct thread_info *owner;
60562 + struct task_struct *owner;
60563
60564 /*
60565 * If we own the BKL, then don't spin. The owner of
60566 @@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
60567 spin_lock_mutex(&lock->wait_lock, flags);
60568
60569 debug_mutex_lock_common(lock, &waiter);
60570 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
60571 + debug_mutex_add_waiter(lock, &waiter, task);
60572
60573 /* add waiting tasks to the end of the waitqueue (FIFO): */
60574 list_add_tail(&waiter.list, &lock->wait_list);
60575 @@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
60576 * TASK_UNINTERRUPTIBLE case.)
60577 */
60578 if (unlikely(signal_pending_state(state, task))) {
60579 - mutex_remove_waiter(lock, &waiter,
60580 - task_thread_info(task));
60581 + mutex_remove_waiter(lock, &waiter, task);
60582 mutex_release(&lock->dep_map, 1, ip);
60583 spin_unlock_mutex(&lock->wait_lock, flags);
60584
60585 @@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
60586 done:
60587 lock_acquired(&lock->dep_map, ip);
60588 /* got the lock - rejoice! */
60589 - mutex_remove_waiter(lock, &waiter, current_thread_info());
60590 + mutex_remove_waiter(lock, &waiter, task);
60591 mutex_set_owner(lock);
60592
60593 /* set it to 0 if there are no waiters left: */
60594 diff -urNp linux-2.6.32.42/kernel/mutex-debug.c linux-2.6.32.42/kernel/mutex-debug.c
60595 --- linux-2.6.32.42/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
60596 +++ linux-2.6.32.42/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
60597 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
60598 }
60599
60600 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60601 - struct thread_info *ti)
60602 + struct task_struct *task)
60603 {
60604 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
60605
60606 /* Mark the current thread as blocked on the lock: */
60607 - ti->task->blocked_on = waiter;
60608 + task->blocked_on = waiter;
60609 }
60610
60611 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60612 - struct thread_info *ti)
60613 + struct task_struct *task)
60614 {
60615 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
60616 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
60617 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
60618 - ti->task->blocked_on = NULL;
60619 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
60620 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
60621 + task->blocked_on = NULL;
60622
60623 list_del_init(&waiter->list);
60624 waiter->task = NULL;
60625 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
60626 return;
60627
60628 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
60629 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
60630 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
60631 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
60632 mutex_clear_owner(lock);
60633 }
60634 diff -urNp linux-2.6.32.42/kernel/mutex-debug.h linux-2.6.32.42/kernel/mutex-debug.h
60635 --- linux-2.6.32.42/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
60636 +++ linux-2.6.32.42/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
60637 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
60638 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
60639 extern void debug_mutex_add_waiter(struct mutex *lock,
60640 struct mutex_waiter *waiter,
60641 - struct thread_info *ti);
60642 + struct task_struct *task);
60643 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
60644 - struct thread_info *ti);
60645 + struct task_struct *task);
60646 extern void debug_mutex_unlock(struct mutex *lock);
60647 extern void debug_mutex_init(struct mutex *lock, const char *name,
60648 struct lock_class_key *key);
60649
60650 static inline void mutex_set_owner(struct mutex *lock)
60651 {
60652 - lock->owner = current_thread_info();
60653 + lock->owner = current;
60654 }
60655
60656 static inline void mutex_clear_owner(struct mutex *lock)
60657 diff -urNp linux-2.6.32.42/kernel/mutex.h linux-2.6.32.42/kernel/mutex.h
60658 --- linux-2.6.32.42/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
60659 +++ linux-2.6.32.42/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
60660 @@ -19,7 +19,7 @@
60661 #ifdef CONFIG_SMP
60662 static inline void mutex_set_owner(struct mutex *lock)
60663 {
60664 - lock->owner = current_thread_info();
60665 + lock->owner = current;
60666 }
60667
60668 static inline void mutex_clear_owner(struct mutex *lock)
60669 diff -urNp linux-2.6.32.42/kernel/panic.c linux-2.6.32.42/kernel/panic.c
60670 --- linux-2.6.32.42/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
60671 +++ linux-2.6.32.42/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
60672 @@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
60673 const char *board;
60674
60675 printk(KERN_WARNING "------------[ cut here ]------------\n");
60676 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
60677 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
60678 board = dmi_get_system_info(DMI_PRODUCT_NAME);
60679 if (board)
60680 printk(KERN_WARNING "Hardware name: %s\n", board);
60681 @@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
60682 */
60683 void __stack_chk_fail(void)
60684 {
60685 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
60686 + dump_stack();
60687 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
60688 __builtin_return_address(0));
60689 }
60690 EXPORT_SYMBOL(__stack_chk_fail);
60691 diff -urNp linux-2.6.32.42/kernel/params.c linux-2.6.32.42/kernel/params.c
60692 --- linux-2.6.32.42/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
60693 +++ linux-2.6.32.42/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
60694 @@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
60695 return ret;
60696 }
60697
60698 -static struct sysfs_ops module_sysfs_ops = {
60699 +static const struct sysfs_ops module_sysfs_ops = {
60700 .show = module_attr_show,
60701 .store = module_attr_store,
60702 };
60703 @@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
60704 return 0;
60705 }
60706
60707 -static struct kset_uevent_ops module_uevent_ops = {
60708 +static const struct kset_uevent_ops module_uevent_ops = {
60709 .filter = uevent_filter,
60710 };
60711
60712 diff -urNp linux-2.6.32.42/kernel/perf_event.c linux-2.6.32.42/kernel/perf_event.c
60713 --- linux-2.6.32.42/kernel/perf_event.c 2011-04-17 17:00:52.000000000 -0400
60714 +++ linux-2.6.32.42/kernel/perf_event.c 2011-05-04 17:56:28.000000000 -0400
60715 @@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
60716 */
60717 int sysctl_perf_event_sample_rate __read_mostly = 100000;
60718
60719 -static atomic64_t perf_event_id;
60720 +static atomic64_unchecked_t perf_event_id;
60721
60722 /*
60723 * Lock for (sysadmin-configurable) event reservations:
60724 @@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
60725 * In order to keep per-task stats reliable we need to flip the event
60726 * values when we flip the contexts.
60727 */
60728 - value = atomic64_read(&next_event->count);
60729 - value = atomic64_xchg(&event->count, value);
60730 - atomic64_set(&next_event->count, value);
60731 + value = atomic64_read_unchecked(&next_event->count);
60732 + value = atomic64_xchg_unchecked(&event->count, value);
60733 + atomic64_set_unchecked(&next_event->count, value);
60734
60735 swap(event->total_time_enabled, next_event->total_time_enabled);
60736 swap(event->total_time_running, next_event->total_time_running);
60737 @@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
60738 update_event_times(event);
60739 }
60740
60741 - return atomic64_read(&event->count);
60742 + return atomic64_read_unchecked(&event->count);
60743 }
60744
60745 /*
60746 @@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
60747 values[n++] = 1 + leader->nr_siblings;
60748 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60749 values[n++] = leader->total_time_enabled +
60750 - atomic64_read(&leader->child_total_time_enabled);
60751 + atomic64_read_unchecked(&leader->child_total_time_enabled);
60752 }
60753 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60754 values[n++] = leader->total_time_running +
60755 - atomic64_read(&leader->child_total_time_running);
60756 + atomic64_read_unchecked(&leader->child_total_time_running);
60757 }
60758
60759 size = n * sizeof(u64);
60760 @@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
60761 values[n++] = perf_event_read_value(event);
60762 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60763 values[n++] = event->total_time_enabled +
60764 - atomic64_read(&event->child_total_time_enabled);
60765 + atomic64_read_unchecked(&event->child_total_time_enabled);
60766 }
60767 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60768 values[n++] = event->total_time_running +
60769 - atomic64_read(&event->child_total_time_running);
60770 + atomic64_read_unchecked(&event->child_total_time_running);
60771 }
60772 if (read_format & PERF_FORMAT_ID)
60773 values[n++] = primary_event_id(event);
60774 @@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
60775 static void perf_event_reset(struct perf_event *event)
60776 {
60777 (void)perf_event_read(event);
60778 - atomic64_set(&event->count, 0);
60779 + atomic64_set_unchecked(&event->count, 0);
60780 perf_event_update_userpage(event);
60781 }
60782
60783 @@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
60784 ++userpg->lock;
60785 barrier();
60786 userpg->index = perf_event_index(event);
60787 - userpg->offset = atomic64_read(&event->count);
60788 + userpg->offset = atomic64_read_unchecked(&event->count);
60789 if (event->state == PERF_EVENT_STATE_ACTIVE)
60790 - userpg->offset -= atomic64_read(&event->hw.prev_count);
60791 + userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
60792
60793 userpg->time_enabled = event->total_time_enabled +
60794 - atomic64_read(&event->child_total_time_enabled);
60795 + atomic64_read_unchecked(&event->child_total_time_enabled);
60796
60797 userpg->time_running = event->total_time_running +
60798 - atomic64_read(&event->child_total_time_running);
60799 + atomic64_read_unchecked(&event->child_total_time_running);
60800
60801 barrier();
60802 ++userpg->lock;
60803 @@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
60804 u64 values[4];
60805 int n = 0;
60806
60807 - values[n++] = atomic64_read(&event->count);
60808 + values[n++] = atomic64_read_unchecked(&event->count);
60809 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
60810 values[n++] = event->total_time_enabled +
60811 - atomic64_read(&event->child_total_time_enabled);
60812 + atomic64_read_unchecked(&event->child_total_time_enabled);
60813 }
60814 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
60815 values[n++] = event->total_time_running +
60816 - atomic64_read(&event->child_total_time_running);
60817 + atomic64_read_unchecked(&event->child_total_time_running);
60818 }
60819 if (read_format & PERF_FORMAT_ID)
60820 values[n++] = primary_event_id(event);
60821 @@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
60822 if (leader != event)
60823 leader->pmu->read(leader);
60824
60825 - values[n++] = atomic64_read(&leader->count);
60826 + values[n++] = atomic64_read_unchecked(&leader->count);
60827 if (read_format & PERF_FORMAT_ID)
60828 values[n++] = primary_event_id(leader);
60829
60830 @@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
60831 if (sub != event)
60832 sub->pmu->read(sub);
60833
60834 - values[n++] = atomic64_read(&sub->count);
60835 + values[n++] = atomic64_read_unchecked(&sub->count);
60836 if (read_format & PERF_FORMAT_ID)
60837 values[n++] = primary_event_id(sub);
60838
60839 @@ -3787,7 +3787,7 @@ static void perf_swevent_add(struct perf
60840 {
60841 struct hw_perf_event *hwc = &event->hw;
60842
60843 - atomic64_add(nr, &event->count);
60844 + atomic64_add_unchecked(nr, &event->count);
60845
60846 if (!hwc->sample_period)
60847 return;
60848 @@ -4044,9 +4044,9 @@ static void cpu_clock_perf_event_update(
60849 u64 now;
60850
60851 now = cpu_clock(cpu);
60852 - prev = atomic64_read(&event->hw.prev_count);
60853 - atomic64_set(&event->hw.prev_count, now);
60854 - atomic64_add(now - prev, &event->count);
60855 + prev = atomic64_read_unchecked(&event->hw.prev_count);
60856 + atomic64_set_unchecked(&event->hw.prev_count, now);
60857 + atomic64_add_unchecked(now - prev, &event->count);
60858 }
60859
60860 static int cpu_clock_perf_event_enable(struct perf_event *event)
60861 @@ -4054,7 +4054,7 @@ static int cpu_clock_perf_event_enable(s
60862 struct hw_perf_event *hwc = &event->hw;
60863 int cpu = raw_smp_processor_id();
60864
60865 - atomic64_set(&hwc->prev_count, cpu_clock(cpu));
60866 + atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
60867 perf_swevent_start_hrtimer(event);
60868
60869 return 0;
60870 @@ -4086,9 +4086,9 @@ static void task_clock_perf_event_update
60871 u64 prev;
60872 s64 delta;
60873
60874 - prev = atomic64_xchg(&event->hw.prev_count, now);
60875 + prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
60876 delta = now - prev;
60877 - atomic64_add(delta, &event->count);
60878 + atomic64_add_unchecked(delta, &event->count);
60879 }
60880
60881 static int task_clock_perf_event_enable(struct perf_event *event)
60882 @@ -4098,7 +4098,7 @@ static int task_clock_perf_event_enable(
60883
60884 now = event->ctx->time;
60885
60886 - atomic64_set(&hwc->prev_count, now);
60887 + atomic64_set_unchecked(&hwc->prev_count, now);
60888
60889 perf_swevent_start_hrtimer(event);
60890
60891 @@ -4293,7 +4293,7 @@ perf_event_alloc(struct perf_event_attr
60892 event->parent = parent_event;
60893
60894 event->ns = get_pid_ns(current->nsproxy->pid_ns);
60895 - event->id = atomic64_inc_return(&perf_event_id);
60896 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
60897
60898 event->state = PERF_EVENT_STATE_INACTIVE;
60899
60900 @@ -4724,15 +4724,15 @@ static void sync_child_event(struct perf
60901 if (child_event->attr.inherit_stat)
60902 perf_event_read_event(child_event, child);
60903
60904 - child_val = atomic64_read(&child_event->count);
60905 + child_val = atomic64_read_unchecked(&child_event->count);
60906
60907 /*
60908 * Add back the child's count to the parent's count:
60909 */
60910 - atomic64_add(child_val, &parent_event->count);
60911 - atomic64_add(child_event->total_time_enabled,
60912 + atomic64_add_unchecked(child_val, &parent_event->count);
60913 + atomic64_add_unchecked(child_event->total_time_enabled,
60914 &parent_event->child_total_time_enabled);
60915 - atomic64_add(child_event->total_time_running,
60916 + atomic64_add_unchecked(child_event->total_time_running,
60917 &parent_event->child_total_time_running);
60918
60919 /*
60920 diff -urNp linux-2.6.32.42/kernel/pid.c linux-2.6.32.42/kernel/pid.c
60921 --- linux-2.6.32.42/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
60922 +++ linux-2.6.32.42/kernel/pid.c 2011-04-18 19:22:38.000000000 -0400
60923 @@ -33,6 +33,7 @@
60924 #include <linux/rculist.h>
60925 #include <linux/bootmem.h>
60926 #include <linux/hash.h>
60927 +#include <linux/security.h>
60928 #include <linux/pid_namespace.h>
60929 #include <linux/init_task.h>
60930 #include <linux/syscalls.h>
60931 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
60932
60933 int pid_max = PID_MAX_DEFAULT;
60934
60935 -#define RESERVED_PIDS 300
60936 +#define RESERVED_PIDS 500
60937
60938 int pid_max_min = RESERVED_PIDS + 1;
60939 int pid_max_max = PID_MAX_LIMIT;
60940 @@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
60941 */
60942 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
60943 {
60944 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
60945 + struct task_struct *task;
60946 +
60947 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
60948 +
60949 + if (gr_pid_is_chrooted(task))
60950 + return NULL;
60951 +
60952 + return task;
60953 }
60954
60955 struct task_struct *find_task_by_vpid(pid_t vnr)
60956 diff -urNp linux-2.6.32.42/kernel/posix-cpu-timers.c linux-2.6.32.42/kernel/posix-cpu-timers.c
60957 --- linux-2.6.32.42/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
60958 +++ linux-2.6.32.42/kernel/posix-cpu-timers.c 2011-04-17 15:56:46.000000000 -0400
60959 @@ -6,6 +6,7 @@
60960 #include <linux/posix-timers.h>
60961 #include <linux/errno.h>
60962 #include <linux/math64.h>
60963 +#include <linux/security.h>
60964 #include <asm/uaccess.h>
60965 #include <linux/kernel_stat.h>
60966 #include <trace/events/timer.h>
60967 diff -urNp linux-2.6.32.42/kernel/posix-timers.c linux-2.6.32.42/kernel/posix-timers.c
60968 --- linux-2.6.32.42/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
60969 +++ linux-2.6.32.42/kernel/posix-timers.c 2011-05-16 21:46:57.000000000 -0400
60970 @@ -42,6 +42,7 @@
60971 #include <linux/compiler.h>
60972 #include <linux/idr.h>
60973 #include <linux/posix-timers.h>
60974 +#include <linux/grsecurity.h>
60975 #include <linux/syscalls.h>
60976 #include <linux/wait.h>
60977 #include <linux/workqueue.h>
60978 @@ -296,6 +297,8 @@ static __init int init_posix_timers(void
60979 .nsleep = no_nsleep,
60980 };
60981
60982 + pax_track_stack();
60983 +
60984 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
60985 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
60986 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
60987 @@ -948,6 +951,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
60988 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
60989 return -EFAULT;
60990
60991 + /* only the CLOCK_REALTIME clock can be set, all other clocks
60992 + have their clock_set fptr set to a nosettime dummy function
60993 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
60994 + call common_clock_set, which calls do_sys_settimeofday, which
60995 + we hook
60996 + */
60997 +
60998 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
60999 }
61000
61001 diff -urNp linux-2.6.32.42/kernel/power/hibernate.c linux-2.6.32.42/kernel/power/hibernate.c
61002 --- linux-2.6.32.42/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
61003 +++ linux-2.6.32.42/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
61004 @@ -48,14 +48,14 @@ enum {
61005
61006 static int hibernation_mode = HIBERNATION_SHUTDOWN;
61007
61008 -static struct platform_hibernation_ops *hibernation_ops;
61009 +static const struct platform_hibernation_ops *hibernation_ops;
61010
61011 /**
61012 * hibernation_set_ops - set the global hibernate operations
61013 * @ops: the hibernation operations to use in subsequent hibernation transitions
61014 */
61015
61016 -void hibernation_set_ops(struct platform_hibernation_ops *ops)
61017 +void hibernation_set_ops(const struct platform_hibernation_ops *ops)
61018 {
61019 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
61020 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
61021 diff -urNp linux-2.6.32.42/kernel/power/poweroff.c linux-2.6.32.42/kernel/power/poweroff.c
61022 --- linux-2.6.32.42/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
61023 +++ linux-2.6.32.42/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
61024 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
61025 .enable_mask = SYSRQ_ENABLE_BOOT,
61026 };
61027
61028 -static int pm_sysrq_init(void)
61029 +static int __init pm_sysrq_init(void)
61030 {
61031 register_sysrq_key('o', &sysrq_poweroff_op);
61032 return 0;
61033 diff -urNp linux-2.6.32.42/kernel/power/process.c linux-2.6.32.42/kernel/power/process.c
61034 --- linux-2.6.32.42/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
61035 +++ linux-2.6.32.42/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
61036 @@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
61037 struct timeval start, end;
61038 u64 elapsed_csecs64;
61039 unsigned int elapsed_csecs;
61040 + bool timedout = false;
61041
61042 do_gettimeofday(&start);
61043
61044 end_time = jiffies + TIMEOUT;
61045 do {
61046 todo = 0;
61047 + if (time_after(jiffies, end_time))
61048 + timedout = true;
61049 read_lock(&tasklist_lock);
61050 do_each_thread(g, p) {
61051 if (frozen(p) || !freezeable(p))
61052 @@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
61053 * It is "frozen enough". If the task does wake
61054 * up, it will immediately call try_to_freeze.
61055 */
61056 - if (!task_is_stopped_or_traced(p) &&
61057 - !freezer_should_skip(p))
61058 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
61059 todo++;
61060 + if (timedout) {
61061 + printk(KERN_ERR "Task refusing to freeze:\n");
61062 + sched_show_task(p);
61063 + }
61064 + }
61065 } while_each_thread(g, p);
61066 read_unlock(&tasklist_lock);
61067 yield(); /* Yield is okay here */
61068 - if (time_after(jiffies, end_time))
61069 - break;
61070 - } while (todo);
61071 + } while (todo && !timedout);
61072
61073 do_gettimeofday(&end);
61074 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
61075 diff -urNp linux-2.6.32.42/kernel/power/suspend.c linux-2.6.32.42/kernel/power/suspend.c
61076 --- linux-2.6.32.42/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
61077 +++ linux-2.6.32.42/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
61078 @@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
61079 [PM_SUSPEND_MEM] = "mem",
61080 };
61081
61082 -static struct platform_suspend_ops *suspend_ops;
61083 +static const struct platform_suspend_ops *suspend_ops;
61084
61085 /**
61086 * suspend_set_ops - Set the global suspend method table.
61087 * @ops: Pointer to ops structure.
61088 */
61089 -void suspend_set_ops(struct platform_suspend_ops *ops)
61090 +void suspend_set_ops(const struct platform_suspend_ops *ops)
61091 {
61092 mutex_lock(&pm_mutex);
61093 suspend_ops = ops;
61094 diff -urNp linux-2.6.32.42/kernel/printk.c linux-2.6.32.42/kernel/printk.c
61095 --- linux-2.6.32.42/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
61096 +++ linux-2.6.32.42/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
61097 @@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
61098 char c;
61099 int error = 0;
61100
61101 +#ifdef CONFIG_GRKERNSEC_DMESG
61102 + if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
61103 + return -EPERM;
61104 +#endif
61105 +
61106 error = security_syslog(type);
61107 if (error)
61108 return error;
61109 diff -urNp linux-2.6.32.42/kernel/profile.c linux-2.6.32.42/kernel/profile.c
61110 --- linux-2.6.32.42/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
61111 +++ linux-2.6.32.42/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
61112 @@ -39,7 +39,7 @@ struct profile_hit {
61113 /* Oprofile timer tick hook */
61114 static int (*timer_hook)(struct pt_regs *) __read_mostly;
61115
61116 -static atomic_t *prof_buffer;
61117 +static atomic_unchecked_t *prof_buffer;
61118 static unsigned long prof_len, prof_shift;
61119
61120 int prof_on __read_mostly;
61121 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
61122 hits[i].pc = 0;
61123 continue;
61124 }
61125 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61126 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61127 hits[i].hits = hits[i].pc = 0;
61128 }
61129 }
61130 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
61131 * Add the current hit(s) and flush the write-queue out
61132 * to the global buffer:
61133 */
61134 - atomic_add(nr_hits, &prof_buffer[pc]);
61135 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
61136 for (i = 0; i < NR_PROFILE_HIT; ++i) {
61137 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61138 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61139 hits[i].pc = hits[i].hits = 0;
61140 }
61141 out:
61142 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
61143 if (prof_on != type || !prof_buffer)
61144 return;
61145 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
61146 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61147 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61148 }
61149 #endif /* !CONFIG_SMP */
61150 EXPORT_SYMBOL_GPL(profile_hits);
61151 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
61152 return -EFAULT;
61153 buf++; p++; count--; read++;
61154 }
61155 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
61156 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
61157 if (copy_to_user(buf, (void *)pnt, count))
61158 return -EFAULT;
61159 read += count;
61160 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
61161 }
61162 #endif
61163 profile_discard_flip_buffers();
61164 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
61165 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
61166 return count;
61167 }
61168
61169 diff -urNp linux-2.6.32.42/kernel/ptrace.c linux-2.6.32.42/kernel/ptrace.c
61170 --- linux-2.6.32.42/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
61171 +++ linux-2.6.32.42/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
61172 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
61173 return ret;
61174 }
61175
61176 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
61177 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
61178 + unsigned int log)
61179 {
61180 const struct cred *cred = current_cred(), *tcred;
61181
61182 @@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
61183 cred->gid != tcred->egid ||
61184 cred->gid != tcred->sgid ||
61185 cred->gid != tcred->gid) &&
61186 - !capable(CAP_SYS_PTRACE)) {
61187 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61188 + (log && !capable(CAP_SYS_PTRACE)))
61189 + ) {
61190 rcu_read_unlock();
61191 return -EPERM;
61192 }
61193 @@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
61194 smp_rmb();
61195 if (task->mm)
61196 dumpable = get_dumpable(task->mm);
61197 - if (!dumpable && !capable(CAP_SYS_PTRACE))
61198 + if (!dumpable &&
61199 + ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61200 + (log && !capable(CAP_SYS_PTRACE))))
61201 return -EPERM;
61202
61203 return security_ptrace_access_check(task, mode);
61204 @@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
61205 {
61206 int err;
61207 task_lock(task);
61208 - err = __ptrace_may_access(task, mode);
61209 + err = __ptrace_may_access(task, mode, 0);
61210 + task_unlock(task);
61211 + return !err;
61212 +}
61213 +
61214 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
61215 +{
61216 + int err;
61217 + task_lock(task);
61218 + err = __ptrace_may_access(task, mode, 1);
61219 task_unlock(task);
61220 return !err;
61221 }
61222 @@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
61223 goto out;
61224
61225 task_lock(task);
61226 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
61227 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
61228 task_unlock(task);
61229 if (retval)
61230 goto unlock_creds;
61231 @@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
61232 goto unlock_tasklist;
61233
61234 task->ptrace = PT_PTRACED;
61235 - if (capable(CAP_SYS_PTRACE))
61236 + if (capable_nolog(CAP_SYS_PTRACE))
61237 task->ptrace |= PT_PTRACE_CAP;
61238
61239 __ptrace_link(task, current);
61240 @@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
61241 {
61242 int copied = 0;
61243
61244 + pax_track_stack();
61245 +
61246 while (len > 0) {
61247 char buf[128];
61248 int this_len, retval;
61249 @@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
61250 {
61251 int copied = 0;
61252
61253 + pax_track_stack();
61254 +
61255 while (len > 0) {
61256 char buf[128];
61257 int this_len, retval;
61258 @@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
61259 int ret = -EIO;
61260 siginfo_t siginfo;
61261
61262 + pax_track_stack();
61263 +
61264 switch (request) {
61265 case PTRACE_PEEKTEXT:
61266 case PTRACE_PEEKDATA:
61267 @@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
61268 ret = ptrace_setoptions(child, data);
61269 break;
61270 case PTRACE_GETEVENTMSG:
61271 - ret = put_user(child->ptrace_message, (unsigned long __user *) data);
61272 + ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
61273 break;
61274
61275 case PTRACE_GETSIGINFO:
61276 ret = ptrace_getsiginfo(child, &siginfo);
61277 if (!ret)
61278 - ret = copy_siginfo_to_user((siginfo_t __user *) data,
61279 + ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
61280 &siginfo);
61281 break;
61282
61283 case PTRACE_SETSIGINFO:
61284 - if (copy_from_user(&siginfo, (siginfo_t __user *) data,
61285 + if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
61286 sizeof siginfo))
61287 ret = -EFAULT;
61288 else
61289 @@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
61290 goto out;
61291 }
61292
61293 + if (gr_handle_ptrace(child, request)) {
61294 + ret = -EPERM;
61295 + goto out_put_task_struct;
61296 + }
61297 +
61298 if (request == PTRACE_ATTACH) {
61299 ret = ptrace_attach(child);
61300 /*
61301 * Some architectures need to do book-keeping after
61302 * a ptrace attach.
61303 */
61304 - if (!ret)
61305 + if (!ret) {
61306 arch_ptrace_attach(child);
61307 + gr_audit_ptrace(child);
61308 + }
61309 goto out_put_task_struct;
61310 }
61311
61312 @@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
61313 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
61314 if (copied != sizeof(tmp))
61315 return -EIO;
61316 - return put_user(tmp, (unsigned long __user *)data);
61317 + return put_user(tmp, (__force unsigned long __user *)data);
61318 }
61319
61320 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
61321 @@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
61322 siginfo_t siginfo;
61323 int ret;
61324
61325 + pax_track_stack();
61326 +
61327 switch (request) {
61328 case PTRACE_PEEKTEXT:
61329 case PTRACE_PEEKDATA:
61330 @@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
61331 goto out;
61332 }
61333
61334 + if (gr_handle_ptrace(child, request)) {
61335 + ret = -EPERM;
61336 + goto out_put_task_struct;
61337 + }
61338 +
61339 if (request == PTRACE_ATTACH) {
61340 ret = ptrace_attach(child);
61341 /*
61342 * Some architectures need to do book-keeping after
61343 * a ptrace attach.
61344 */
61345 - if (!ret)
61346 + if (!ret) {
61347 arch_ptrace_attach(child);
61348 + gr_audit_ptrace(child);
61349 + }
61350 goto out_put_task_struct;
61351 }
61352
61353 diff -urNp linux-2.6.32.42/kernel/rcutorture.c linux-2.6.32.42/kernel/rcutorture.c
61354 --- linux-2.6.32.42/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
61355 +++ linux-2.6.32.42/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
61356 @@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
61357 { 0 };
61358 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
61359 { 0 };
61360 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61361 -static atomic_t n_rcu_torture_alloc;
61362 -static atomic_t n_rcu_torture_alloc_fail;
61363 -static atomic_t n_rcu_torture_free;
61364 -static atomic_t n_rcu_torture_mberror;
61365 -static atomic_t n_rcu_torture_error;
61366 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
61367 +static atomic_unchecked_t n_rcu_torture_alloc;
61368 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
61369 +static atomic_unchecked_t n_rcu_torture_free;
61370 +static atomic_unchecked_t n_rcu_torture_mberror;
61371 +static atomic_unchecked_t n_rcu_torture_error;
61372 static long n_rcu_torture_timers;
61373 static struct list_head rcu_torture_removed;
61374 static cpumask_var_t shuffle_tmp_mask;
61375 @@ -187,11 +187,11 @@ rcu_torture_alloc(void)
61376
61377 spin_lock_bh(&rcu_torture_lock);
61378 if (list_empty(&rcu_torture_freelist)) {
61379 - atomic_inc(&n_rcu_torture_alloc_fail);
61380 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
61381 spin_unlock_bh(&rcu_torture_lock);
61382 return NULL;
61383 }
61384 - atomic_inc(&n_rcu_torture_alloc);
61385 + atomic_inc_unchecked(&n_rcu_torture_alloc);
61386 p = rcu_torture_freelist.next;
61387 list_del_init(p);
61388 spin_unlock_bh(&rcu_torture_lock);
61389 @@ -204,7 +204,7 @@ rcu_torture_alloc(void)
61390 static void
61391 rcu_torture_free(struct rcu_torture *p)
61392 {
61393 - atomic_inc(&n_rcu_torture_free);
61394 + atomic_inc_unchecked(&n_rcu_torture_free);
61395 spin_lock_bh(&rcu_torture_lock);
61396 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
61397 spin_unlock_bh(&rcu_torture_lock);
61398 @@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
61399 i = rp->rtort_pipe_count;
61400 if (i > RCU_TORTURE_PIPE_LEN)
61401 i = RCU_TORTURE_PIPE_LEN;
61402 - atomic_inc(&rcu_torture_wcount[i]);
61403 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61404 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61405 rp->rtort_mbtest = 0;
61406 rcu_torture_free(rp);
61407 @@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
61408 i = rp->rtort_pipe_count;
61409 if (i > RCU_TORTURE_PIPE_LEN)
61410 i = RCU_TORTURE_PIPE_LEN;
61411 - atomic_inc(&rcu_torture_wcount[i]);
61412 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61413 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
61414 rp->rtort_mbtest = 0;
61415 list_del(&rp->rtort_free);
61416 @@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
61417 i = old_rp->rtort_pipe_count;
61418 if (i > RCU_TORTURE_PIPE_LEN)
61419 i = RCU_TORTURE_PIPE_LEN;
61420 - atomic_inc(&rcu_torture_wcount[i]);
61421 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
61422 old_rp->rtort_pipe_count++;
61423 cur_ops->deferred_free(old_rp);
61424 }
61425 @@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
61426 return;
61427 }
61428 if (p->rtort_mbtest == 0)
61429 - atomic_inc(&n_rcu_torture_mberror);
61430 + atomic_inc_unchecked(&n_rcu_torture_mberror);
61431 spin_lock(&rand_lock);
61432 cur_ops->read_delay(&rand);
61433 n_rcu_torture_timers++;
61434 @@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
61435 continue;
61436 }
61437 if (p->rtort_mbtest == 0)
61438 - atomic_inc(&n_rcu_torture_mberror);
61439 + atomic_inc_unchecked(&n_rcu_torture_mberror);
61440 cur_ops->read_delay(&rand);
61441 preempt_disable();
61442 pipe_count = p->rtort_pipe_count;
61443 @@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
61444 rcu_torture_current,
61445 rcu_torture_current_version,
61446 list_empty(&rcu_torture_freelist),
61447 - atomic_read(&n_rcu_torture_alloc),
61448 - atomic_read(&n_rcu_torture_alloc_fail),
61449 - atomic_read(&n_rcu_torture_free),
61450 - atomic_read(&n_rcu_torture_mberror),
61451 + atomic_read_unchecked(&n_rcu_torture_alloc),
61452 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
61453 + atomic_read_unchecked(&n_rcu_torture_free),
61454 + atomic_read_unchecked(&n_rcu_torture_mberror),
61455 n_rcu_torture_timers);
61456 - if (atomic_read(&n_rcu_torture_mberror) != 0)
61457 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
61458 cnt += sprintf(&page[cnt], " !!!");
61459 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
61460 if (i > 1) {
61461 cnt += sprintf(&page[cnt], "!!! ");
61462 - atomic_inc(&n_rcu_torture_error);
61463 + atomic_inc_unchecked(&n_rcu_torture_error);
61464 WARN_ON_ONCE(1);
61465 }
61466 cnt += sprintf(&page[cnt], "Reader Pipe: ");
61467 @@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
61468 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
61469 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61470 cnt += sprintf(&page[cnt], " %d",
61471 - atomic_read(&rcu_torture_wcount[i]));
61472 + atomic_read_unchecked(&rcu_torture_wcount[i]));
61473 }
61474 cnt += sprintf(&page[cnt], "\n");
61475 if (cur_ops->stats)
61476 @@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
61477
61478 if (cur_ops->cleanup)
61479 cur_ops->cleanup();
61480 - if (atomic_read(&n_rcu_torture_error))
61481 + if (atomic_read_unchecked(&n_rcu_torture_error))
61482 rcu_torture_print_module_parms("End of test: FAILURE");
61483 else
61484 rcu_torture_print_module_parms("End of test: SUCCESS");
61485 @@ -1138,13 +1138,13 @@ rcu_torture_init(void)
61486
61487 rcu_torture_current = NULL;
61488 rcu_torture_current_version = 0;
61489 - atomic_set(&n_rcu_torture_alloc, 0);
61490 - atomic_set(&n_rcu_torture_alloc_fail, 0);
61491 - atomic_set(&n_rcu_torture_free, 0);
61492 - atomic_set(&n_rcu_torture_mberror, 0);
61493 - atomic_set(&n_rcu_torture_error, 0);
61494 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
61495 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
61496 + atomic_set_unchecked(&n_rcu_torture_free, 0);
61497 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
61498 + atomic_set_unchecked(&n_rcu_torture_error, 0);
61499 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
61500 - atomic_set(&rcu_torture_wcount[i], 0);
61501 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
61502 for_each_possible_cpu(cpu) {
61503 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
61504 per_cpu(rcu_torture_count, cpu)[i] = 0;
61505 diff -urNp linux-2.6.32.42/kernel/rcutree.c linux-2.6.32.42/kernel/rcutree.c
61506 --- linux-2.6.32.42/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
61507 +++ linux-2.6.32.42/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
61508 @@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
61509 /*
61510 * Do softirq processing for the current CPU.
61511 */
61512 -static void rcu_process_callbacks(struct softirq_action *unused)
61513 +static void rcu_process_callbacks(void)
61514 {
61515 /*
61516 * Memory references from any prior RCU read-side critical sections
61517 diff -urNp linux-2.6.32.42/kernel/rcutree_plugin.h linux-2.6.32.42/kernel/rcutree_plugin.h
61518 --- linux-2.6.32.42/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
61519 +++ linux-2.6.32.42/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
61520 @@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
61521 */
61522 void __rcu_read_lock(void)
61523 {
61524 - ACCESS_ONCE(current->rcu_read_lock_nesting)++;
61525 + ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
61526 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
61527 }
61528 EXPORT_SYMBOL_GPL(__rcu_read_lock);
61529 @@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
61530 struct task_struct *t = current;
61531
61532 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
61533 - if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
61534 + if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
61535 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
61536 rcu_read_unlock_special(t);
61537 }
61538 diff -urNp linux-2.6.32.42/kernel/relay.c linux-2.6.32.42/kernel/relay.c
61539 --- linux-2.6.32.42/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
61540 +++ linux-2.6.32.42/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
61541 @@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
61542 unsigned int flags,
61543 int *nonpad_ret)
61544 {
61545 - unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
61546 + unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
61547 struct rchan_buf *rbuf = in->private_data;
61548 unsigned int subbuf_size = rbuf->chan->subbuf_size;
61549 uint64_t pos = (uint64_t) *ppos;
61550 @@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
61551 .ops = &relay_pipe_buf_ops,
61552 .spd_release = relay_page_release,
61553 };
61554 + ssize_t ret;
61555 +
61556 + pax_track_stack();
61557
61558 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
61559 return 0;
61560 diff -urNp linux-2.6.32.42/kernel/resource.c linux-2.6.32.42/kernel/resource.c
61561 --- linux-2.6.32.42/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
61562 +++ linux-2.6.32.42/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
61563 @@ -132,8 +132,18 @@ static const struct file_operations proc
61564
61565 static int __init ioresources_init(void)
61566 {
61567 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
61568 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61569 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
61570 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
61571 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61572 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
61573 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
61574 +#endif
61575 +#else
61576 proc_create("ioports", 0, NULL, &proc_ioports_operations);
61577 proc_create("iomem", 0, NULL, &proc_iomem_operations);
61578 +#endif
61579 return 0;
61580 }
61581 __initcall(ioresources_init);
61582 diff -urNp linux-2.6.32.42/kernel/rtmutex.c linux-2.6.32.42/kernel/rtmutex.c
61583 --- linux-2.6.32.42/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
61584 +++ linux-2.6.32.42/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
61585 @@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
61586 */
61587 spin_lock_irqsave(&pendowner->pi_lock, flags);
61588
61589 - WARN_ON(!pendowner->pi_blocked_on);
61590 + BUG_ON(!pendowner->pi_blocked_on);
61591 WARN_ON(pendowner->pi_blocked_on != waiter);
61592 WARN_ON(pendowner->pi_blocked_on->lock != lock);
61593
61594 diff -urNp linux-2.6.32.42/kernel/rtmutex-tester.c linux-2.6.32.42/kernel/rtmutex-tester.c
61595 --- linux-2.6.32.42/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
61596 +++ linux-2.6.32.42/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
61597 @@ -21,7 +21,7 @@
61598 #define MAX_RT_TEST_MUTEXES 8
61599
61600 static spinlock_t rttest_lock;
61601 -static atomic_t rttest_event;
61602 +static atomic_unchecked_t rttest_event;
61603
61604 struct test_thread_data {
61605 int opcode;
61606 @@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
61607
61608 case RTTEST_LOCKCONT:
61609 td->mutexes[td->opdata] = 1;
61610 - td->event = atomic_add_return(1, &rttest_event);
61611 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61612 return 0;
61613
61614 case RTTEST_RESET:
61615 @@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
61616 return 0;
61617
61618 case RTTEST_RESETEVENT:
61619 - atomic_set(&rttest_event, 0);
61620 + atomic_set_unchecked(&rttest_event, 0);
61621 return 0;
61622
61623 default:
61624 @@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
61625 return ret;
61626
61627 td->mutexes[id] = 1;
61628 - td->event = atomic_add_return(1, &rttest_event);
61629 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61630 rt_mutex_lock(&mutexes[id]);
61631 - td->event = atomic_add_return(1, &rttest_event);
61632 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61633 td->mutexes[id] = 4;
61634 return 0;
61635
61636 @@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
61637 return ret;
61638
61639 td->mutexes[id] = 1;
61640 - td->event = atomic_add_return(1, &rttest_event);
61641 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61642 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
61643 - td->event = atomic_add_return(1, &rttest_event);
61644 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61645 td->mutexes[id] = ret ? 0 : 4;
61646 return ret ? -EINTR : 0;
61647
61648 @@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
61649 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
61650 return ret;
61651
61652 - td->event = atomic_add_return(1, &rttest_event);
61653 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61654 rt_mutex_unlock(&mutexes[id]);
61655 - td->event = atomic_add_return(1, &rttest_event);
61656 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61657 td->mutexes[id] = 0;
61658 return 0;
61659
61660 @@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
61661 break;
61662
61663 td->mutexes[dat] = 2;
61664 - td->event = atomic_add_return(1, &rttest_event);
61665 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61666 break;
61667
61668 case RTTEST_LOCKBKL:
61669 @@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
61670 return;
61671
61672 td->mutexes[dat] = 3;
61673 - td->event = atomic_add_return(1, &rttest_event);
61674 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61675 break;
61676
61677 case RTTEST_LOCKNOWAIT:
61678 @@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
61679 return;
61680
61681 td->mutexes[dat] = 1;
61682 - td->event = atomic_add_return(1, &rttest_event);
61683 + td->event = atomic_add_return_unchecked(1, &rttest_event);
61684 return;
61685
61686 case RTTEST_LOCKBKL:
61687 diff -urNp linux-2.6.32.42/kernel/sched.c linux-2.6.32.42/kernel/sched.c
61688 --- linux-2.6.32.42/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
61689 +++ linux-2.6.32.42/kernel/sched.c 2011-05-22 23:02:06.000000000 -0400
61690 @@ -5043,7 +5043,7 @@ out:
61691 * In CONFIG_NO_HZ case, the idle load balance owner will do the
61692 * rebalancing for all the cpus for whom scheduler ticks are stopped.
61693 */
61694 -static void run_rebalance_domains(struct softirq_action *h)
61695 +static void run_rebalance_domains(void)
61696 {
61697 int this_cpu = smp_processor_id();
61698 struct rq *this_rq = cpu_rq(this_cpu);
61699 @@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
61700 struct rq *rq;
61701 int cpu;
61702
61703 + pax_track_stack();
61704 +
61705 need_resched:
61706 preempt_disable();
61707 cpu = smp_processor_id();
61708 @@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
61709 * Look out! "owner" is an entirely speculative pointer
61710 * access and not reliable.
61711 */
61712 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
61713 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
61714 {
61715 unsigned int cpu;
61716 struct rq *rq;
61717 @@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lo
61718 * DEBUG_PAGEALLOC could have unmapped it if
61719 * the mutex owner just released it and exited.
61720 */
61721 - if (probe_kernel_address(&owner->cpu, cpu))
61722 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
61723 return 0;
61724 #else
61725 - cpu = owner->cpu;
61726 + cpu = task_thread_info(owner)->cpu;
61727 #endif
61728
61729 /*
61730 @@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lo
61731 /*
61732 * Is that owner really running on that cpu?
61733 */
61734 - if (task_thread_info(rq->curr) != owner || need_resched())
61735 + if (rq->curr != owner || need_resched())
61736 return 0;
61737
61738 cpu_relax();
61739 @@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p
61740 /* convert nice value [19,-20] to rlimit style value [1,40] */
61741 int nice_rlim = 20 - nice;
61742
61743 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
61744 +
61745 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
61746 capable(CAP_SYS_NICE));
61747 }
61748 @@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
61749 if (nice > 19)
61750 nice = 19;
61751
61752 - if (increment < 0 && !can_nice(current, nice))
61753 + if (increment < 0 && (!can_nice(current, nice) ||
61754 + gr_handle_chroot_nice()))
61755 return -EPERM;
61756
61757 retval = security_task_setnice(current, nice);
61758 @@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int
61759 long power;
61760 int weight;
61761
61762 - WARN_ON(!sd || !sd->groups);
61763 + BUG_ON(!sd || !sd->groups);
61764
61765 if (cpu != group_first_cpu(sd->groups))
61766 return;
61767 diff -urNp linux-2.6.32.42/kernel/signal.c linux-2.6.32.42/kernel/signal.c
61768 --- linux-2.6.32.42/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
61769 +++ linux-2.6.32.42/kernel/signal.c 2011-05-22 23:02:06.000000000 -0400
61770 @@ -41,12 +41,12 @@
61771
61772 static struct kmem_cache *sigqueue_cachep;
61773
61774 -static void __user *sig_handler(struct task_struct *t, int sig)
61775 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
61776 {
61777 return t->sighand->action[sig - 1].sa.sa_handler;
61778 }
61779
61780 -static int sig_handler_ignored(void __user *handler, int sig)
61781 +static int sig_handler_ignored(__sighandler_t handler, int sig)
61782 {
61783 /* Is it explicitly or implicitly ignored? */
61784 return handler == SIG_IGN ||
61785 @@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
61786 static int sig_task_ignored(struct task_struct *t, int sig,
61787 int from_ancestor_ns)
61788 {
61789 - void __user *handler;
61790 + __sighandler_t handler;
61791
61792 handler = sig_handler(t, sig);
61793
61794 @@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
61795 */
61796 user = get_uid(__task_cred(t)->user);
61797 atomic_inc(&user->sigpending);
61798 +
61799 + if (!override_rlimit)
61800 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
61801 if (override_rlimit ||
61802 atomic_read(&user->sigpending) <=
61803 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
61804 @@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
61805
61806 int unhandled_signal(struct task_struct *tsk, int sig)
61807 {
61808 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
61809 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
61810 if (is_global_init(tsk))
61811 return 1;
61812 if (handler != SIG_IGN && handler != SIG_DFL)
61813 @@ -627,6 +630,9 @@ static int check_kill_permission(int sig
61814 }
61815 }
61816
61817 + if (gr_handle_signal(t, sig))
61818 + return -EPERM;
61819 +
61820 return security_task_kill(t, info, sig, 0);
61821 }
61822
61823 @@ -968,7 +974,7 @@ __group_send_sig_info(int sig, struct si
61824 return send_signal(sig, info, p, 1);
61825 }
61826
61827 -static int
61828 +int
61829 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
61830 {
61831 return send_signal(sig, info, t, 0);
61832 @@ -1005,6 +1011,7 @@ force_sig_info(int sig, struct siginfo *
61833 unsigned long int flags;
61834 int ret, blocked, ignored;
61835 struct k_sigaction *action;
61836 + int is_unhandled = 0;
61837
61838 spin_lock_irqsave(&t->sighand->siglock, flags);
61839 action = &t->sighand->action[sig-1];
61840 @@ -1019,9 +1026,18 @@ force_sig_info(int sig, struct siginfo *
61841 }
61842 if (action->sa.sa_handler == SIG_DFL)
61843 t->signal->flags &= ~SIGNAL_UNKILLABLE;
61844 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
61845 + is_unhandled = 1;
61846 ret = specific_send_sig_info(sig, info, t);
61847 spin_unlock_irqrestore(&t->sighand->siglock, flags);
61848
61849 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
61850 + normal operation */
61851 + if (is_unhandled) {
61852 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
61853 + gr_handle_crash(t, sig);
61854 + }
61855 +
61856 return ret;
61857 }
61858
61859 @@ -1081,8 +1097,11 @@ int group_send_sig_info(int sig, struct
61860 {
61861 int ret = check_kill_permission(sig, info, p);
61862
61863 - if (!ret && sig)
61864 + if (!ret && sig) {
61865 ret = do_send_sig_info(sig, info, p, true);
61866 + if (!ret)
61867 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
61868 + }
61869
61870 return ret;
61871 }
61872 @@ -1644,6 +1663,8 @@ void ptrace_notify(int exit_code)
61873 {
61874 siginfo_t info;
61875
61876 + pax_track_stack();
61877 +
61878 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
61879
61880 memset(&info, 0, sizeof info);
61881 diff -urNp linux-2.6.32.42/kernel/smp.c linux-2.6.32.42/kernel/smp.c
61882 --- linux-2.6.32.42/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
61883 +++ linux-2.6.32.42/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
61884 @@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
61885 }
61886 EXPORT_SYMBOL(smp_call_function);
61887
61888 -void ipi_call_lock(void)
61889 +void ipi_call_lock(void) __acquires(call_function.lock)
61890 {
61891 spin_lock(&call_function.lock);
61892 }
61893
61894 -void ipi_call_unlock(void)
61895 +void ipi_call_unlock(void) __releases(call_function.lock)
61896 {
61897 spin_unlock(&call_function.lock);
61898 }
61899
61900 -void ipi_call_lock_irq(void)
61901 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
61902 {
61903 spin_lock_irq(&call_function.lock);
61904 }
61905
61906 -void ipi_call_unlock_irq(void)
61907 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
61908 {
61909 spin_unlock_irq(&call_function.lock);
61910 }
61911 diff -urNp linux-2.6.32.42/kernel/softirq.c linux-2.6.32.42/kernel/softirq.c
61912 --- linux-2.6.32.42/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
61913 +++ linux-2.6.32.42/kernel/softirq.c 2011-04-17 15:56:46.000000000 -0400
61914 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
61915
61916 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
61917
61918 -char *softirq_to_name[NR_SOFTIRQS] = {
61919 +const char * const softirq_to_name[NR_SOFTIRQS] = {
61920 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61921 "TASKLET", "SCHED", "HRTIMER", "RCU"
61922 };
61923 @@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
61924
61925 asmlinkage void __do_softirq(void)
61926 {
61927 - struct softirq_action *h;
61928 + const struct softirq_action *h;
61929 __u32 pending;
61930 int max_restart = MAX_SOFTIRQ_RESTART;
61931 int cpu;
61932 @@ -233,7 +233,7 @@ restart:
61933 kstat_incr_softirqs_this_cpu(h - softirq_vec);
61934
61935 trace_softirq_entry(h, softirq_vec);
61936 - h->action(h);
61937 + h->action();
61938 trace_softirq_exit(h, softirq_vec);
61939 if (unlikely(prev_count != preempt_count())) {
61940 printk(KERN_ERR "huh, entered softirq %td %s %p"
61941 @@ -363,7 +363,7 @@ void raise_softirq(unsigned int nr)
61942 local_irq_restore(flags);
61943 }
61944
61945 -void open_softirq(int nr, void (*action)(struct softirq_action *))
61946 +void open_softirq(int nr, void (*action)(void))
61947 {
61948 softirq_vec[nr].action = action;
61949 }
61950 @@ -419,7 +419,7 @@ void __tasklet_hi_schedule_first(struct
61951
61952 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
61953
61954 -static void tasklet_action(struct softirq_action *a)
61955 +static void tasklet_action(void)
61956 {
61957 struct tasklet_struct *list;
61958
61959 @@ -454,7 +454,7 @@ static void tasklet_action(struct softir
61960 }
61961 }
61962
61963 -static void tasklet_hi_action(struct softirq_action *a)
61964 +static void tasklet_hi_action(void)
61965 {
61966 struct tasklet_struct *list;
61967
61968 diff -urNp linux-2.6.32.42/kernel/sys.c linux-2.6.32.42/kernel/sys.c
61969 --- linux-2.6.32.42/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
61970 +++ linux-2.6.32.42/kernel/sys.c 2011-04-17 15:56:46.000000000 -0400
61971 @@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
61972 error = -EACCES;
61973 goto out;
61974 }
61975 +
61976 + if (gr_handle_chroot_setpriority(p, niceval)) {
61977 + error = -EACCES;
61978 + goto out;
61979 + }
61980 +
61981 no_nice = security_task_setnice(p, niceval);
61982 if (no_nice) {
61983 error = no_nice;
61984 @@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
61985 !(user = find_user(who)))
61986 goto out_unlock; /* No processes for this user */
61987
61988 - do_each_thread(g, p)
61989 + do_each_thread(g, p) {
61990 if (__task_cred(p)->uid == who)
61991 error = set_one_prio(p, niceval, error);
61992 - while_each_thread(g, p);
61993 + } while_each_thread(g, p);
61994 if (who != cred->uid)
61995 free_uid(user); /* For find_user() */
61996 break;
61997 @@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
61998 !(user = find_user(who)))
61999 goto out_unlock; /* No processes for this user */
62000
62001 - do_each_thread(g, p)
62002 + do_each_thread(g, p) {
62003 if (__task_cred(p)->uid == who) {
62004 niceval = 20 - task_nice(p);
62005 if (niceval > retval)
62006 retval = niceval;
62007 }
62008 - while_each_thread(g, p);
62009 + } while_each_thread(g, p);
62010 if (who != cred->uid)
62011 free_uid(user); /* for find_user() */
62012 break;
62013 @@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
62014 goto error;
62015 }
62016
62017 + if (gr_check_group_change(new->gid, new->egid, -1))
62018 + goto error;
62019 +
62020 if (rgid != (gid_t) -1 ||
62021 (egid != (gid_t) -1 && egid != old->gid))
62022 new->sgid = new->egid;
62023 @@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
62024 goto error;
62025
62026 retval = -EPERM;
62027 +
62028 + if (gr_check_group_change(gid, gid, gid))
62029 + goto error;
62030 +
62031 if (capable(CAP_SETGID))
62032 new->gid = new->egid = new->sgid = new->fsgid = gid;
62033 else if (gid == old->gid || gid == old->sgid)
62034 @@ -627,6 +640,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
62035 goto error;
62036 }
62037
62038 + if (gr_check_user_change(new->uid, new->euid, -1))
62039 + goto error;
62040 +
62041 if (new->uid != old->uid) {
62042 retval = set_user(new);
62043 if (retval < 0)
62044 @@ -675,6 +691,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
62045 goto error;
62046
62047 retval = -EPERM;
62048 +
62049 + if (gr_check_crash_uid(uid))
62050 + goto error;
62051 + if (gr_check_user_change(uid, uid, uid))
62052 + goto error;
62053 +
62054 if (capable(CAP_SETUID)) {
62055 new->suid = new->uid = uid;
62056 if (uid != old->uid) {
62057 @@ -732,6 +754,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
62058 goto error;
62059 }
62060
62061 + if (gr_check_user_change(ruid, euid, -1))
62062 + goto error;
62063 +
62064 if (ruid != (uid_t) -1) {
62065 new->uid = ruid;
62066 if (ruid != old->uid) {
62067 @@ -800,6 +825,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
62068 goto error;
62069 }
62070
62071 + if (gr_check_group_change(rgid, egid, -1))
62072 + goto error;
62073 +
62074 if (rgid != (gid_t) -1)
62075 new->gid = rgid;
62076 if (egid != (gid_t) -1)
62077 @@ -849,6 +877,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62078 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
62079 goto error;
62080
62081 + if (gr_check_user_change(-1, -1, uid))
62082 + goto error;
62083 +
62084 if (uid == old->uid || uid == old->euid ||
62085 uid == old->suid || uid == old->fsuid ||
62086 capable(CAP_SETUID)) {
62087 @@ -889,6 +920,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
62088 if (gid == old->gid || gid == old->egid ||
62089 gid == old->sgid || gid == old->fsgid ||
62090 capable(CAP_SETGID)) {
62091 + if (gr_check_group_change(-1, -1, gid))
62092 + goto error;
62093 +
62094 if (gid != old_fsgid) {
62095 new->fsgid = gid;
62096 goto change_okay;
62097 @@ -1454,7 +1488,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
62098 error = get_dumpable(me->mm);
62099 break;
62100 case PR_SET_DUMPABLE:
62101 - if (arg2 < 0 || arg2 > 1) {
62102 + if (arg2 > 1) {
62103 error = -EINVAL;
62104 break;
62105 }
62106 diff -urNp linux-2.6.32.42/kernel/sysctl.c linux-2.6.32.42/kernel/sysctl.c
62107 --- linux-2.6.32.42/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
62108 +++ linux-2.6.32.42/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
62109 @@ -63,6 +63,13 @@
62110 static int deprecated_sysctl_warning(struct __sysctl_args *args);
62111
62112 #if defined(CONFIG_SYSCTL)
62113 +#include <linux/grsecurity.h>
62114 +#include <linux/grinternal.h>
62115 +
62116 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
62117 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
62118 + const int op);
62119 +extern int gr_handle_chroot_sysctl(const int op);
62120
62121 /* External variables not in a header file. */
62122 extern int C_A_D;
62123 @@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
62124 static int proc_taint(struct ctl_table *table, int write,
62125 void __user *buffer, size_t *lenp, loff_t *ppos);
62126 #endif
62127 +extern ctl_table grsecurity_table[];
62128
62129 static struct ctl_table root_table[];
62130 static struct ctl_table_root sysctl_table_root;
62131 @@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
62132 int sysctl_legacy_va_layout;
62133 #endif
62134
62135 +#ifdef CONFIG_PAX_SOFTMODE
62136 +static ctl_table pax_table[] = {
62137 + {
62138 + .ctl_name = CTL_UNNUMBERED,
62139 + .procname = "softmode",
62140 + .data = &pax_softmode,
62141 + .maxlen = sizeof(unsigned int),
62142 + .mode = 0600,
62143 + .proc_handler = &proc_dointvec,
62144 + },
62145 +
62146 + { .ctl_name = 0 }
62147 +};
62148 +#endif
62149 +
62150 extern int prove_locking;
62151 extern int lock_stat;
62152
62153 @@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
62154 #endif
62155
62156 static struct ctl_table kern_table[] = {
62157 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
62158 + {
62159 + .ctl_name = CTL_UNNUMBERED,
62160 + .procname = "grsecurity",
62161 + .mode = 0500,
62162 + .child = grsecurity_table,
62163 + },
62164 +#endif
62165 +
62166 +#ifdef CONFIG_PAX_SOFTMODE
62167 + {
62168 + .ctl_name = CTL_UNNUMBERED,
62169 + .procname = "pax",
62170 + .mode = 0500,
62171 + .child = pax_table,
62172 + },
62173 +#endif
62174 +
62175 {
62176 .ctl_name = CTL_UNNUMBERED,
62177 .procname = "sched_child_runs_first",
62178 @@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
62179 .data = &modprobe_path,
62180 .maxlen = KMOD_PATH_LEN,
62181 .mode = 0644,
62182 - .proc_handler = &proc_dostring,
62183 - .strategy = &sysctl_string,
62184 + .proc_handler = &proc_dostring_modpriv,
62185 + .strategy = &sysctl_string_modpriv,
62186 },
62187 {
62188 .ctl_name = CTL_UNNUMBERED,
62189 @@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
62190 .mode = 0644,
62191 .proc_handler = &proc_dointvec
62192 },
62193 + {
62194 + .procname = "heap_stack_gap",
62195 + .data = &sysctl_heap_stack_gap,
62196 + .maxlen = sizeof(sysctl_heap_stack_gap),
62197 + .mode = 0644,
62198 + .proc_handler = proc_doulongvec_minmax,
62199 + },
62200 #else
62201 {
62202 .ctl_name = CTL_UNNUMBERED,
62203 @@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
62204 return 0;
62205 }
62206
62207 +static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
62208 +
62209 static int parse_table(int __user *name, int nlen,
62210 void __user *oldval, size_t __user *oldlenp,
62211 void __user *newval, size_t newlen,
62212 @@ -1821,7 +1871,7 @@ repeat:
62213 if (n == table->ctl_name) {
62214 int error;
62215 if (table->child) {
62216 - if (sysctl_perm(root, table, MAY_EXEC))
62217 + if (sysctl_perm_nochk(root, table, MAY_EXEC))
62218 return -EPERM;
62219 name++;
62220 nlen--;
62221 @@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
62222 int error;
62223 int mode;
62224
62225 + if (table->parent != NULL && table->parent->procname != NULL &&
62226 + table->procname != NULL &&
62227 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
62228 + return -EACCES;
62229 + if (gr_handle_chroot_sysctl(op))
62230 + return -EACCES;
62231 + error = gr_handle_sysctl(table, op);
62232 + if (error)
62233 + return error;
62234 +
62235 + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62236 + if (error)
62237 + return error;
62238 +
62239 + if (root->permissions)
62240 + mode = root->permissions(root, current->nsproxy, table);
62241 + else
62242 + mode = table->mode;
62243 +
62244 + return test_perm(mode, op);
62245 +}
62246 +
62247 +int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
62248 +{
62249 + int error;
62250 + int mode;
62251 +
62252 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
62253 if (error)
62254 return error;
62255 @@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
62256 buffer, lenp, ppos);
62257 }
62258
62259 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62260 + void __user *buffer, size_t *lenp, loff_t *ppos)
62261 +{
62262 + if (write && !capable(CAP_SYS_MODULE))
62263 + return -EPERM;
62264 +
62265 + return _proc_do_string(table->data, table->maxlen, write,
62266 + buffer, lenp, ppos);
62267 +}
62268 +
62269
62270 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
62271 int *valp,
62272 @@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
62273 vleft = table->maxlen / sizeof(unsigned long);
62274 left = *lenp;
62275
62276 - for (; left && vleft--; i++, min++, max++, first=0) {
62277 + for (; left && vleft--; i++, first=0) {
62278 if (write) {
62279 while (left) {
62280 char c;
62281 @@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
62282 return -ENOSYS;
62283 }
62284
62285 +int proc_dostring_modpriv(struct ctl_table *table, int write,
62286 + void __user *buffer, size_t *lenp, loff_t *ppos)
62287 +{
62288 + return -ENOSYS;
62289 +}
62290 +
62291 int proc_dointvec(struct ctl_table *table, int write,
62292 void __user *buffer, size_t *lenp, loff_t *ppos)
62293 {
62294 @@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
62295 return 1;
62296 }
62297
62298 +int sysctl_string_modpriv(struct ctl_table *table,
62299 + void __user *oldval, size_t __user *oldlenp,
62300 + void __user *newval, size_t newlen)
62301 +{
62302 + if (newval && newlen && !capable(CAP_SYS_MODULE))
62303 + return -EPERM;
62304 +
62305 + return sysctl_string(table, oldval, oldlenp, newval, newlen);
62306 +}
62307 +
62308 /*
62309 * This function makes sure that all of the integers in the vector
62310 * are between the minimum and maximum values given in the arrays
62311 @@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
62312 return -ENOSYS;
62313 }
62314
62315 +int sysctl_string_modpriv(struct ctl_table *table,
62316 + void __user *oldval, size_t __user *oldlenp,
62317 + void __user *newval, size_t newlen)
62318 +{
62319 + return -ENOSYS;
62320 +}
62321 +
62322 int sysctl_intvec(struct ctl_table *table,
62323 void __user *oldval, size_t __user *oldlenp,
62324 void __user *newval, size_t newlen)
62325 @@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
62326 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
62327 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
62328 EXPORT_SYMBOL(proc_dostring);
62329 +EXPORT_SYMBOL(proc_dostring_modpriv);
62330 EXPORT_SYMBOL(proc_doulongvec_minmax);
62331 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
62332 EXPORT_SYMBOL(register_sysctl_table);
62333 @@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
62334 EXPORT_SYMBOL(sysctl_jiffies);
62335 EXPORT_SYMBOL(sysctl_ms_jiffies);
62336 EXPORT_SYMBOL(sysctl_string);
62337 +EXPORT_SYMBOL(sysctl_string_modpriv);
62338 EXPORT_SYMBOL(sysctl_data);
62339 EXPORT_SYMBOL(unregister_sysctl_table);
62340 diff -urNp linux-2.6.32.42/kernel/sysctl_check.c linux-2.6.32.42/kernel/sysctl_check.c
62341 --- linux-2.6.32.42/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
62342 +++ linux-2.6.32.42/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
62343 @@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
62344 } else {
62345 if ((table->strategy == sysctl_data) ||
62346 (table->strategy == sysctl_string) ||
62347 + (table->strategy == sysctl_string_modpriv) ||
62348 (table->strategy == sysctl_intvec) ||
62349 (table->strategy == sysctl_jiffies) ||
62350 (table->strategy == sysctl_ms_jiffies) ||
62351 (table->proc_handler == proc_dostring) ||
62352 + (table->proc_handler == proc_dostring_modpriv) ||
62353 (table->proc_handler == proc_dointvec) ||
62354 (table->proc_handler == proc_dointvec_minmax) ||
62355 (table->proc_handler == proc_dointvec_jiffies) ||
62356 diff -urNp linux-2.6.32.42/kernel/taskstats.c linux-2.6.32.42/kernel/taskstats.c
62357 --- linux-2.6.32.42/kernel/taskstats.c 2011-03-27 14:31:47.000000000 -0400
62358 +++ linux-2.6.32.42/kernel/taskstats.c 2011-04-17 15:56:46.000000000 -0400
62359 @@ -26,9 +26,12 @@
62360 #include <linux/cgroup.h>
62361 #include <linux/fs.h>
62362 #include <linux/file.h>
62363 +#include <linux/grsecurity.h>
62364 #include <net/genetlink.h>
62365 #include <asm/atomic.h>
62366
62367 +extern int gr_is_taskstats_denied(int pid);
62368 +
62369 /*
62370 * Maximum length of a cpumask that can be specified in
62371 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
62372 @@ -433,6 +436,9 @@ static int taskstats_user_cmd(struct sk_
62373 size_t size;
62374 cpumask_var_t mask;
62375
62376 + if (gr_is_taskstats_denied(current->pid))
62377 + return -EACCES;
62378 +
62379 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
62380 return -ENOMEM;
62381
62382 diff -urNp linux-2.6.32.42/kernel/time/tick-broadcast.c linux-2.6.32.42/kernel/time/tick-broadcast.c
62383 --- linux-2.6.32.42/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
62384 +++ linux-2.6.32.42/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
62385 @@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
62386 * then clear the broadcast bit.
62387 */
62388 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
62389 - int cpu = smp_processor_id();
62390 + cpu = smp_processor_id();
62391
62392 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
62393 tick_broadcast_clear_oneshot(cpu);
62394 diff -urNp linux-2.6.32.42/kernel/time/timekeeping.c linux-2.6.32.42/kernel/time/timekeeping.c
62395 --- linux-2.6.32.42/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
62396 +++ linux-2.6.32.42/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
62397 @@ -14,6 +14,7 @@
62398 #include <linux/init.h>
62399 #include <linux/mm.h>
62400 #include <linux/sched.h>
62401 +#include <linux/grsecurity.h>
62402 #include <linux/sysdev.h>
62403 #include <linux/clocksource.h>
62404 #include <linux/jiffies.h>
62405 @@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
62406 */
62407 struct timespec ts = xtime;
62408 timespec_add_ns(&ts, nsec);
62409 - ACCESS_ONCE(xtime_cache) = ts;
62410 + ACCESS_ONCE_RW(xtime_cache) = ts;
62411 }
62412
62413 /* must hold xtime_lock */
62414 @@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
62415 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
62416 return -EINVAL;
62417
62418 + gr_log_timechange();
62419 +
62420 write_seqlock_irqsave(&xtime_lock, flags);
62421
62422 timekeeping_forward_now();
62423 diff -urNp linux-2.6.32.42/kernel/time/timer_list.c linux-2.6.32.42/kernel/time/timer_list.c
62424 --- linux-2.6.32.42/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
62425 +++ linux-2.6.32.42/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
62426 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
62427
62428 static void print_name_offset(struct seq_file *m, void *sym)
62429 {
62430 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62431 + SEQ_printf(m, "<%p>", NULL);
62432 +#else
62433 char symname[KSYM_NAME_LEN];
62434
62435 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
62436 SEQ_printf(m, "<%p>", sym);
62437 else
62438 SEQ_printf(m, "%s", symname);
62439 +#endif
62440 }
62441
62442 static void
62443 @@ -112,7 +116,11 @@ next_one:
62444 static void
62445 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
62446 {
62447 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62448 + SEQ_printf(m, " .base: %p\n", NULL);
62449 +#else
62450 SEQ_printf(m, " .base: %p\n", base);
62451 +#endif
62452 SEQ_printf(m, " .index: %d\n",
62453 base->index);
62454 SEQ_printf(m, " .resolution: %Lu nsecs\n",
62455 @@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
62456 {
62457 struct proc_dir_entry *pe;
62458
62459 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62460 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
62461 +#else
62462 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
62463 +#endif
62464 if (!pe)
62465 return -ENOMEM;
62466 return 0;
62467 diff -urNp linux-2.6.32.42/kernel/time/timer_stats.c linux-2.6.32.42/kernel/time/timer_stats.c
62468 --- linux-2.6.32.42/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
62469 +++ linux-2.6.32.42/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
62470 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
62471 static unsigned long nr_entries;
62472 static struct entry entries[MAX_ENTRIES];
62473
62474 -static atomic_t overflow_count;
62475 +static atomic_unchecked_t overflow_count;
62476
62477 /*
62478 * The entries are in a hash-table, for fast lookup:
62479 @@ -140,7 +140,7 @@ static void reset_entries(void)
62480 nr_entries = 0;
62481 memset(entries, 0, sizeof(entries));
62482 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
62483 - atomic_set(&overflow_count, 0);
62484 + atomic_set_unchecked(&overflow_count, 0);
62485 }
62486
62487 static struct entry *alloc_entry(void)
62488 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
62489 if (likely(entry))
62490 entry->count++;
62491 else
62492 - atomic_inc(&overflow_count);
62493 + atomic_inc_unchecked(&overflow_count);
62494
62495 out_unlock:
62496 spin_unlock_irqrestore(lock, flags);
62497 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
62498
62499 static void print_name_offset(struct seq_file *m, unsigned long addr)
62500 {
62501 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62502 + seq_printf(m, "<%p>", NULL);
62503 +#else
62504 char symname[KSYM_NAME_LEN];
62505
62506 if (lookup_symbol_name(addr, symname) < 0)
62507 seq_printf(m, "<%p>", (void *)addr);
62508 else
62509 seq_printf(m, "%s", symname);
62510 +#endif
62511 }
62512
62513 static int tstats_show(struct seq_file *m, void *v)
62514 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
62515
62516 seq_puts(m, "Timer Stats Version: v0.2\n");
62517 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
62518 - if (atomic_read(&overflow_count))
62519 + if (atomic_read_unchecked(&overflow_count))
62520 seq_printf(m, "Overflow: %d entries\n",
62521 - atomic_read(&overflow_count));
62522 + atomic_read_unchecked(&overflow_count));
62523
62524 for (i = 0; i < nr_entries; i++) {
62525 entry = entries + i;
62526 @@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
62527 {
62528 struct proc_dir_entry *pe;
62529
62530 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
62531 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
62532 +#else
62533 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
62534 +#endif
62535 if (!pe)
62536 return -ENOMEM;
62537 return 0;
62538 diff -urNp linux-2.6.32.42/kernel/time.c linux-2.6.32.42/kernel/time.c
62539 --- linux-2.6.32.42/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
62540 +++ linux-2.6.32.42/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
62541 @@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
62542 return error;
62543
62544 if (tz) {
62545 + /* we log in do_settimeofday called below, so don't log twice
62546 + */
62547 + if (!tv)
62548 + gr_log_timechange();
62549 +
62550 /* SMP safe, global irq locking makes it work. */
62551 sys_tz = *tz;
62552 update_vsyscall_tz();
62553 @@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
62554 * Avoid unnecessary multiplications/divisions in the
62555 * two most common HZ cases:
62556 */
62557 -unsigned int inline jiffies_to_msecs(const unsigned long j)
62558 +inline unsigned int jiffies_to_msecs(const unsigned long j)
62559 {
62560 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
62561 return (MSEC_PER_SEC / HZ) * j;
62562 @@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
62563 }
62564 EXPORT_SYMBOL(jiffies_to_msecs);
62565
62566 -unsigned int inline jiffies_to_usecs(const unsigned long j)
62567 +inline unsigned int jiffies_to_usecs(const unsigned long j)
62568 {
62569 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
62570 return (USEC_PER_SEC / HZ) * j;
62571 diff -urNp linux-2.6.32.42/kernel/timer.c linux-2.6.32.42/kernel/timer.c
62572 --- linux-2.6.32.42/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
62573 +++ linux-2.6.32.42/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
62574 @@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
62575 /*
62576 * This function runs timers and the timer-tq in bottom half context.
62577 */
62578 -static void run_timer_softirq(struct softirq_action *h)
62579 +static void run_timer_softirq(void)
62580 {
62581 struct tvec_base *base = __get_cpu_var(tvec_bases);
62582
62583 diff -urNp linux-2.6.32.42/kernel/trace/blktrace.c linux-2.6.32.42/kernel/trace/blktrace.c
62584 --- linux-2.6.32.42/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
62585 +++ linux-2.6.32.42/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
62586 @@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
62587 struct blk_trace *bt = filp->private_data;
62588 char buf[16];
62589
62590 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
62591 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
62592
62593 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
62594 }
62595 @@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
62596 return 1;
62597
62598 bt = buf->chan->private_data;
62599 - atomic_inc(&bt->dropped);
62600 + atomic_inc_unchecked(&bt->dropped);
62601 return 0;
62602 }
62603
62604 @@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
62605
62606 bt->dir = dir;
62607 bt->dev = dev;
62608 - atomic_set(&bt->dropped, 0);
62609 + atomic_set_unchecked(&bt->dropped, 0);
62610
62611 ret = -EIO;
62612 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
62613 diff -urNp linux-2.6.32.42/kernel/trace/ftrace.c linux-2.6.32.42/kernel/trace/ftrace.c
62614 --- linux-2.6.32.42/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
62615 +++ linux-2.6.32.42/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
62616 @@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
62617
62618 ip = rec->ip;
62619
62620 + ret = ftrace_arch_code_modify_prepare();
62621 + FTRACE_WARN_ON(ret);
62622 + if (ret)
62623 + return 0;
62624 +
62625 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
62626 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
62627 if (ret) {
62628 ftrace_bug(ret, ip);
62629 rec->flags |= FTRACE_FL_FAILED;
62630 - return 0;
62631 }
62632 - return 1;
62633 + return ret ? 0 : 1;
62634 }
62635
62636 /*
62637 diff -urNp linux-2.6.32.42/kernel/trace/ring_buffer.c linux-2.6.32.42/kernel/trace/ring_buffer.c
62638 --- linux-2.6.32.42/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
62639 +++ linux-2.6.32.42/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
62640 @@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
62641 * the reader page). But if the next page is a header page,
62642 * its flags will be non zero.
62643 */
62644 -static int inline
62645 +static inline int
62646 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
62647 struct buffer_page *page, struct list_head *list)
62648 {
62649 diff -urNp linux-2.6.32.42/kernel/trace/trace.c linux-2.6.32.42/kernel/trace/trace.c
62650 --- linux-2.6.32.42/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
62651 +++ linux-2.6.32.42/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
62652 @@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
62653 size_t rem;
62654 unsigned int i;
62655
62656 + pax_track_stack();
62657 +
62658 /* copy the tracer to avoid using a global lock all around */
62659 mutex_lock(&trace_types_lock);
62660 if (unlikely(old_tracer != current_trace && current_trace)) {
62661 @@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
62662 int entries, size, i;
62663 size_t ret;
62664
62665 + pax_track_stack();
62666 +
62667 if (*ppos & (PAGE_SIZE - 1)) {
62668 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
62669 return -EINVAL;
62670 @@ -3816,10 +3820,9 @@ static const struct file_operations trac
62671 };
62672 #endif
62673
62674 -static struct dentry *d_tracer;
62675 -
62676 struct dentry *tracing_init_dentry(void)
62677 {
62678 + static struct dentry *d_tracer;
62679 static int once;
62680
62681 if (d_tracer)
62682 @@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
62683 return d_tracer;
62684 }
62685
62686 -static struct dentry *d_percpu;
62687 -
62688 struct dentry *tracing_dentry_percpu(void)
62689 {
62690 + static struct dentry *d_percpu;
62691 static int once;
62692 struct dentry *d_tracer;
62693
62694 diff -urNp linux-2.6.32.42/kernel/trace/trace_events.c linux-2.6.32.42/kernel/trace/trace_events.c
62695 --- linux-2.6.32.42/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
62696 +++ linux-2.6.32.42/kernel/trace/trace_events.c 2011-04-17 15:56:46.000000000 -0400
62697 @@ -951,6 +951,8 @@ static LIST_HEAD(ftrace_module_file_list
62698 * Modules must own their file_operations to keep up with
62699 * reference counting.
62700 */
62701 +
62702 +/* cannot be const */
62703 struct ftrace_module_file_ops {
62704 struct list_head list;
62705 struct module *mod;
62706 diff -urNp linux-2.6.32.42/kernel/trace/trace_mmiotrace.c linux-2.6.32.42/kernel/trace/trace_mmiotrace.c
62707 --- linux-2.6.32.42/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
62708 +++ linux-2.6.32.42/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
62709 @@ -23,7 +23,7 @@ struct header_iter {
62710 static struct trace_array *mmio_trace_array;
62711 static bool overrun_detected;
62712 static unsigned long prev_overruns;
62713 -static atomic_t dropped_count;
62714 +static atomic_unchecked_t dropped_count;
62715
62716 static void mmio_reset_data(struct trace_array *tr)
62717 {
62718 @@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
62719
62720 static unsigned long count_overruns(struct trace_iterator *iter)
62721 {
62722 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
62723 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
62724 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
62725
62726 if (over > prev_overruns)
62727 @@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
62728 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
62729 sizeof(*entry), 0, pc);
62730 if (!event) {
62731 - atomic_inc(&dropped_count);
62732 + atomic_inc_unchecked(&dropped_count);
62733 return;
62734 }
62735 entry = ring_buffer_event_data(event);
62736 @@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
62737 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
62738 sizeof(*entry), 0, pc);
62739 if (!event) {
62740 - atomic_inc(&dropped_count);
62741 + atomic_inc_unchecked(&dropped_count);
62742 return;
62743 }
62744 entry = ring_buffer_event_data(event);
62745 diff -urNp linux-2.6.32.42/kernel/trace/trace_output.c linux-2.6.32.42/kernel/trace/trace_output.c
62746 --- linux-2.6.32.42/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
62747 +++ linux-2.6.32.42/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
62748 @@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
62749 return 0;
62750 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
62751 if (!IS_ERR(p)) {
62752 - p = mangle_path(s->buffer + s->len, p, "\n");
62753 + p = mangle_path(s->buffer + s->len, p, "\n\\");
62754 if (p) {
62755 s->len = p - s->buffer;
62756 return 1;
62757 diff -urNp linux-2.6.32.42/kernel/trace/trace_stack.c linux-2.6.32.42/kernel/trace/trace_stack.c
62758 --- linux-2.6.32.42/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
62759 +++ linux-2.6.32.42/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
62760 @@ -50,7 +50,7 @@ static inline void check_stack(void)
62761 return;
62762
62763 /* we do not handle interrupt stacks yet */
62764 - if (!object_is_on_stack(&this_size))
62765 + if (!object_starts_on_stack(&this_size))
62766 return;
62767
62768 local_irq_save(flags);
62769 diff -urNp linux-2.6.32.42/kernel/trace/trace_workqueue.c linux-2.6.32.42/kernel/trace/trace_workqueue.c
62770 --- linux-2.6.32.42/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
62771 +++ linux-2.6.32.42/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
62772 @@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
62773 int cpu;
62774 pid_t pid;
62775 /* Can be inserted from interrupt or user context, need to be atomic */
62776 - atomic_t inserted;
62777 + atomic_unchecked_t inserted;
62778 /*
62779 * Don't need to be atomic, works are serialized in a single workqueue thread
62780 * on a single CPU.
62781 @@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
62782 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
62783 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
62784 if (node->pid == wq_thread->pid) {
62785 - atomic_inc(&node->inserted);
62786 + atomic_inc_unchecked(&node->inserted);
62787 goto found;
62788 }
62789 }
62790 @@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
62791 tsk = get_pid_task(pid, PIDTYPE_PID);
62792 if (tsk) {
62793 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
62794 - atomic_read(&cws->inserted), cws->executed,
62795 + atomic_read_unchecked(&cws->inserted), cws->executed,
62796 tsk->comm);
62797 put_task_struct(tsk);
62798 }
62799 diff -urNp linux-2.6.32.42/kernel/user.c linux-2.6.32.42/kernel/user.c
62800 --- linux-2.6.32.42/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
62801 +++ linux-2.6.32.42/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
62802 @@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
62803 spin_lock_irq(&uidhash_lock);
62804 up = uid_hash_find(uid, hashent);
62805 if (up) {
62806 + put_user_ns(ns);
62807 key_put(new->uid_keyring);
62808 key_put(new->session_keyring);
62809 kmem_cache_free(uid_cachep, new);
62810 diff -urNp linux-2.6.32.42/lib/bug.c linux-2.6.32.42/lib/bug.c
62811 --- linux-2.6.32.42/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
62812 +++ linux-2.6.32.42/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
62813 @@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
62814 return BUG_TRAP_TYPE_NONE;
62815
62816 bug = find_bug(bugaddr);
62817 + if (!bug)
62818 + return BUG_TRAP_TYPE_NONE;
62819
62820 printk(KERN_EMERG "------------[ cut here ]------------\n");
62821
62822 diff -urNp linux-2.6.32.42/lib/debugobjects.c linux-2.6.32.42/lib/debugobjects.c
62823 --- linux-2.6.32.42/lib/debugobjects.c 2011-03-27 14:31:47.000000000 -0400
62824 +++ linux-2.6.32.42/lib/debugobjects.c 2011-04-17 15:56:46.000000000 -0400
62825 @@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
62826 if (limit > 4)
62827 return;
62828
62829 - is_on_stack = object_is_on_stack(addr);
62830 + is_on_stack = object_starts_on_stack(addr);
62831 if (is_on_stack == onstack)
62832 return;
62833
62834 diff -urNp linux-2.6.32.42/lib/dma-debug.c linux-2.6.32.42/lib/dma-debug.c
62835 --- linux-2.6.32.42/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
62836 +++ linux-2.6.32.42/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
62837 @@ -861,7 +861,7 @@ out:
62838
62839 static void check_for_stack(struct device *dev, void *addr)
62840 {
62841 - if (object_is_on_stack(addr))
62842 + if (object_starts_on_stack(addr))
62843 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
62844 "stack [addr=%p]\n", addr);
62845 }
62846 diff -urNp linux-2.6.32.42/lib/idr.c linux-2.6.32.42/lib/idr.c
62847 --- linux-2.6.32.42/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
62848 +++ linux-2.6.32.42/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
62849 @@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
62850 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
62851
62852 /* if already at the top layer, we need to grow */
62853 - if (id >= 1 << (idp->layers * IDR_BITS)) {
62854 + if (id >= (1 << (idp->layers * IDR_BITS))) {
62855 *starting_id = id;
62856 return IDR_NEED_TO_GROW;
62857 }
62858 diff -urNp linux-2.6.32.42/lib/inflate.c linux-2.6.32.42/lib/inflate.c
62859 --- linux-2.6.32.42/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
62860 +++ linux-2.6.32.42/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
62861 @@ -266,7 +266,7 @@ static void free(void *where)
62862 malloc_ptr = free_mem_ptr;
62863 }
62864 #else
62865 -#define malloc(a) kmalloc(a, GFP_KERNEL)
62866 +#define malloc(a) kmalloc((a), GFP_KERNEL)
62867 #define free(a) kfree(a)
62868 #endif
62869
62870 diff -urNp linux-2.6.32.42/lib/Kconfig.debug linux-2.6.32.42/lib/Kconfig.debug
62871 --- linux-2.6.32.42/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
62872 +++ linux-2.6.32.42/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
62873 @@ -905,7 +905,7 @@ config LATENCYTOP
62874 select STACKTRACE
62875 select SCHEDSTATS
62876 select SCHED_DEBUG
62877 - depends on HAVE_LATENCYTOP_SUPPORT
62878 + depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
62879 help
62880 Enable this option if you want to use the LatencyTOP tool
62881 to find out which userspace is blocking on what kernel operations.
62882 diff -urNp linux-2.6.32.42/lib/kobject.c linux-2.6.32.42/lib/kobject.c
62883 --- linux-2.6.32.42/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
62884 +++ linux-2.6.32.42/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
62885 @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
62886 return ret;
62887 }
62888
62889 -struct sysfs_ops kobj_sysfs_ops = {
62890 +const struct sysfs_ops kobj_sysfs_ops = {
62891 .show = kobj_attr_show,
62892 .store = kobj_attr_store,
62893 };
62894 @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
62895 * If the kset was not able to be created, NULL will be returned.
62896 */
62897 static struct kset *kset_create(const char *name,
62898 - struct kset_uevent_ops *uevent_ops,
62899 + const struct kset_uevent_ops *uevent_ops,
62900 struct kobject *parent_kobj)
62901 {
62902 struct kset *kset;
62903 @@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
62904 * If the kset was not able to be created, NULL will be returned.
62905 */
62906 struct kset *kset_create_and_add(const char *name,
62907 - struct kset_uevent_ops *uevent_ops,
62908 + const struct kset_uevent_ops *uevent_ops,
62909 struct kobject *parent_kobj)
62910 {
62911 struct kset *kset;
62912 diff -urNp linux-2.6.32.42/lib/kobject_uevent.c linux-2.6.32.42/lib/kobject_uevent.c
62913 --- linux-2.6.32.42/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
62914 +++ linux-2.6.32.42/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
62915 @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
62916 const char *subsystem;
62917 struct kobject *top_kobj;
62918 struct kset *kset;
62919 - struct kset_uevent_ops *uevent_ops;
62920 + const struct kset_uevent_ops *uevent_ops;
62921 u64 seq;
62922 int i = 0;
62923 int retval = 0;
62924 diff -urNp linux-2.6.32.42/lib/kref.c linux-2.6.32.42/lib/kref.c
62925 --- linux-2.6.32.42/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
62926 +++ linux-2.6.32.42/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
62927 @@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
62928 */
62929 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
62930 {
62931 - WARN_ON(release == NULL);
62932 + BUG_ON(release == NULL);
62933 WARN_ON(release == (void (*)(struct kref *))kfree);
62934
62935 if (atomic_dec_and_test(&kref->refcount)) {
62936 diff -urNp linux-2.6.32.42/lib/parser.c linux-2.6.32.42/lib/parser.c
62937 --- linux-2.6.32.42/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
62938 +++ linux-2.6.32.42/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
62939 @@ -126,7 +126,7 @@ static int match_number(substring_t *s,
62940 char *buf;
62941 int ret;
62942
62943 - buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
62944 + buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
62945 if (!buf)
62946 return -ENOMEM;
62947 memcpy(buf, s->from, s->to - s->from);
62948 diff -urNp linux-2.6.32.42/lib/radix-tree.c linux-2.6.32.42/lib/radix-tree.c
62949 --- linux-2.6.32.42/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
62950 +++ linux-2.6.32.42/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
62951 @@ -81,7 +81,7 @@ struct radix_tree_preload {
62952 int nr;
62953 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
62954 };
62955 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
62956 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
62957
62958 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
62959 {
62960 diff -urNp linux-2.6.32.42/lib/random32.c linux-2.6.32.42/lib/random32.c
62961 --- linux-2.6.32.42/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
62962 +++ linux-2.6.32.42/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
62963 @@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
62964 */
62965 static inline u32 __seed(u32 x, u32 m)
62966 {
62967 - return (x < m) ? x + m : x;
62968 + return (x <= m) ? x + m + 1 : x;
62969 }
62970
62971 /**
62972 diff -urNp linux-2.6.32.42/lib/vsprintf.c linux-2.6.32.42/lib/vsprintf.c
62973 --- linux-2.6.32.42/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
62974 +++ linux-2.6.32.42/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
62975 @@ -16,6 +16,9 @@
62976 * - scnprintf and vscnprintf
62977 */
62978
62979 +#ifdef CONFIG_GRKERNSEC_HIDESYM
62980 +#define __INCLUDED_BY_HIDESYM 1
62981 +#endif
62982 #include <stdarg.h>
62983 #include <linux/module.h>
62984 #include <linux/types.h>
62985 @@ -546,12 +549,12 @@ static char *number(char *buf, char *end
62986 return buf;
62987 }
62988
62989 -static char *string(char *buf, char *end, char *s, struct printf_spec spec)
62990 +static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
62991 {
62992 int len, i;
62993
62994 if ((unsigned long)s < PAGE_SIZE)
62995 - s = "<NULL>";
62996 + s = "(null)";
62997
62998 len = strnlen(s, spec.precision);
62999
63000 @@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
63001 unsigned long value = (unsigned long) ptr;
63002 #ifdef CONFIG_KALLSYMS
63003 char sym[KSYM_SYMBOL_LEN];
63004 - if (ext != 'f' && ext != 's')
63005 + if (ext != 'f' && ext != 's' && ext != 'a')
63006 sprint_symbol(sym, value);
63007 else
63008 kallsyms_lookup(value, NULL, NULL, NULL, sym);
63009 @@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
63010 * - 'f' For simple symbolic function names without offset
63011 * - 'S' For symbolic direct pointers with offset
63012 * - 's' For symbolic direct pointers without offset
63013 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
63014 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
63015 * - 'R' For a struct resource pointer, it prints the range of
63016 * addresses (not the name nor the flags)
63017 * - 'M' For a 6-byte MAC address, it prints the address in the
63018 @@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
63019 struct printf_spec spec)
63020 {
63021 if (!ptr)
63022 - return string(buf, end, "(null)", spec);
63023 + return string(buf, end, "(nil)", spec);
63024
63025 switch (*fmt) {
63026 case 'F':
63027 @@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
63028 case 's':
63029 /* Fallthrough */
63030 case 'S':
63031 +#ifdef CONFIG_GRKERNSEC_HIDESYM
63032 + break;
63033 +#else
63034 + return symbol_string(buf, end, ptr, spec, *fmt);
63035 +#endif
63036 + case 'a':
63037 + /* Fallthrough */
63038 + case 'A':
63039 return symbol_string(buf, end, ptr, spec, *fmt);
63040 case 'R':
63041 return resource_string(buf, end, ptr, spec);
63042 @@ -1445,7 +1458,7 @@ do { \
63043 size_t len;
63044 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
63045 || (unsigned long)save_str < PAGE_SIZE)
63046 - save_str = "<NULL>";
63047 + save_str = "(null)";
63048 len = strlen(save_str);
63049 if (str + len + 1 < end)
63050 memcpy(str, save_str, len + 1);
63051 @@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
63052 typeof(type) value; \
63053 if (sizeof(type) == 8) { \
63054 args = PTR_ALIGN(args, sizeof(u32)); \
63055 - *(u32 *)&value = *(u32 *)args; \
63056 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
63057 + *(u32 *)&value = *(const u32 *)args; \
63058 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
63059 } else { \
63060 args = PTR_ALIGN(args, sizeof(type)); \
63061 - value = *(typeof(type) *)args; \
63062 + value = *(const typeof(type) *)args; \
63063 } \
63064 args += sizeof(type); \
63065 value; \
63066 @@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
63067 const char *str_arg = args;
63068 size_t len = strlen(str_arg);
63069 args += len + 1;
63070 - str = string(str, end, (char *)str_arg, spec);
63071 + str = string(str, end, str_arg, spec);
63072 break;
63073 }
63074
63075 diff -urNp linux-2.6.32.42/localversion-grsec linux-2.6.32.42/localversion-grsec
63076 --- linux-2.6.32.42/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
63077 +++ linux-2.6.32.42/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
63078 @@ -0,0 +1 @@
63079 +-grsec
63080 diff -urNp linux-2.6.32.42/Makefile linux-2.6.32.42/Makefile
63081 --- linux-2.6.32.42/Makefile 2011-06-25 12:55:34.000000000 -0400
63082 +++ linux-2.6.32.42/Makefile 2011-06-25 12:56:37.000000000 -0400
63083 @@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
63084
63085 HOSTCC = gcc
63086 HOSTCXX = g++
63087 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
63088 -HOSTCXXFLAGS = -O2
63089 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
63090 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
63091 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
63092
63093 # Decide whether to build built-in, modular, or both.
63094 # Normally, just do built-in.
63095 @@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
63096 KBUILD_CPPFLAGS := -D__KERNEL__
63097
63098 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
63099 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
63100 -fno-strict-aliasing -fno-common \
63101 -Werror-implicit-function-declaration \
63102 -Wno-format-security \
63103 -fno-delete-null-pointer-checks
63104 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
63105 KBUILD_AFLAGS := -D__ASSEMBLY__
63106
63107 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
63108 @@ -403,7 +406,7 @@ endif
63109 # of make so .config is not included in this case either (for *config).
63110
63111 no-dot-config-targets := clean mrproper distclean \
63112 - cscope TAGS tags help %docs check% \
63113 + cscope gtags TAGS tags help %docs check% \
63114 include/linux/version.h headers_% \
63115 kernelrelease kernelversion
63116
63117 @@ -644,7 +647,7 @@ export mod_strip_cmd
63118
63119
63120 ifeq ($(KBUILD_EXTMOD),)
63121 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
63122 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
63123
63124 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
63125 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
63126 @@ -949,7 +952,19 @@ include/config/kernel.release: include/c
63127 # version.h and scripts_basic is processed / created.
63128
63129 # Listed in dependency order
63130 -PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3
63131 +PHONY += prepare archprepare prepare0 prepare1 prepare2 prepare3 pax-plugin
63132 +
63133 +ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
63134 +KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0405, -fplugin=$(objtree)/tools/gcc/pax_plugin.so -fplugin-arg-pax_plugin-track-lowest-sp=100)
63135 +endif
63136 +pax-plugin:
63137 +ifneq (,$(findstring pax_plugin, $(KBUILD_CFLAGS)))
63138 + $(Q)$(MAKE) $(build)=tools/gcc
63139 +else
63140 +ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
63141 + $(Q)echo "warning, your gcc does not support plugins, PAX_MEMORY_STACKLEAK will be less secure"
63142 +endif
63143 +endif
63144
63145 # prepare3 is used to check if we are building in a separate output directory,
63146 # and if so do:
63147 @@ -970,7 +985,7 @@ ifneq ($(KBUILD_SRC),)
63148 endif
63149
63150 # prepare2 creates a makefile if using a separate output directory
63151 -prepare2: prepare3 outputmakefile
63152 +prepare2: prepare3 outputmakefile pax-plugin
63153
63154 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
63155 include/asm include/config/auto.conf
63156 @@ -1198,7 +1213,7 @@ MRPROPER_FILES += .config .config.old in
63157 include/linux/autoconf.h include/linux/version.h \
63158 include/linux/utsrelease.h \
63159 include/linux/bounds.h include/asm*/asm-offsets.h \
63160 - Module.symvers Module.markers tags TAGS cscope*
63161 + Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
63162
63163 # clean - Delete most, but leave enough to build external modules
63164 #
63165 @@ -1289,6 +1304,7 @@ help:
63166 @echo ' modules_prepare - Set up for building external modules'
63167 @echo ' tags/TAGS - Generate tags file for editors'
63168 @echo ' cscope - Generate cscope index'
63169 + @echo ' gtags - Generate GNU GLOBAL index'
63170 @echo ' kernelrelease - Output the release version string'
63171 @echo ' kernelversion - Output the version stored in Makefile'
63172 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
63173 @@ -1445,7 +1461,7 @@ endif # KBUILD_EXTMOD
63174 quiet_cmd_tags = GEN $@
63175 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
63176
63177 -tags TAGS cscope: FORCE
63178 +tags TAGS cscope gtags: FORCE
63179 $(call cmd,tags)
63180
63181 # Scripts to check various things for consistency
63182 diff -urNp linux-2.6.32.42/mm/backing-dev.c linux-2.6.32.42/mm/backing-dev.c
63183 --- linux-2.6.32.42/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
63184 +++ linux-2.6.32.42/mm/backing-dev.c 2011-05-04 17:56:28.000000000 -0400
63185 @@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
63186 * Add the default flusher task that gets created for any bdi
63187 * that has dirty data pending writeout
63188 */
63189 -void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63190 +static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63191 {
63192 if (!bdi_cap_writeback_dirty(bdi))
63193 return;
63194 diff -urNp linux-2.6.32.42/mm/filemap.c linux-2.6.32.42/mm/filemap.c
63195 --- linux-2.6.32.42/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
63196 +++ linux-2.6.32.42/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
63197 @@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
63198 struct address_space *mapping = file->f_mapping;
63199
63200 if (!mapping->a_ops->readpage)
63201 - return -ENOEXEC;
63202 + return -ENODEV;
63203 file_accessed(file);
63204 vma->vm_ops = &generic_file_vm_ops;
63205 vma->vm_flags |= VM_CAN_NONLINEAR;
63206 @@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
63207 *pos = i_size_read(inode);
63208
63209 if (limit != RLIM_INFINITY) {
63210 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
63211 if (*pos >= limit) {
63212 send_sig(SIGXFSZ, current, 0);
63213 return -EFBIG;
63214 diff -urNp linux-2.6.32.42/mm/fremap.c linux-2.6.32.42/mm/fremap.c
63215 --- linux-2.6.32.42/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
63216 +++ linux-2.6.32.42/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
63217 @@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63218 retry:
63219 vma = find_vma(mm, start);
63220
63221 +#ifdef CONFIG_PAX_SEGMEXEC
63222 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
63223 + goto out;
63224 +#endif
63225 +
63226 /*
63227 * Make sure the vma is shared, that it supports prefaulting,
63228 * and that the remapped range is valid and fully within
63229 @@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
63230 /*
63231 * drop PG_Mlocked flag for over-mapped range
63232 */
63233 - unsigned int saved_flags = vma->vm_flags;
63234 + unsigned long saved_flags = vma->vm_flags;
63235 munlock_vma_pages_range(vma, start, start + size);
63236 vma->vm_flags = saved_flags;
63237 }
63238 diff -urNp linux-2.6.32.42/mm/highmem.c linux-2.6.32.42/mm/highmem.c
63239 --- linux-2.6.32.42/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
63240 +++ linux-2.6.32.42/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
63241 @@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
63242 * So no dangers, even with speculative execution.
63243 */
63244 page = pte_page(pkmap_page_table[i]);
63245 + pax_open_kernel();
63246 pte_clear(&init_mm, (unsigned long)page_address(page),
63247 &pkmap_page_table[i]);
63248 -
63249 + pax_close_kernel();
63250 set_page_address(page, NULL);
63251 need_flush = 1;
63252 }
63253 @@ -177,9 +178,11 @@ start:
63254 }
63255 }
63256 vaddr = PKMAP_ADDR(last_pkmap_nr);
63257 +
63258 + pax_open_kernel();
63259 set_pte_at(&init_mm, vaddr,
63260 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
63261 -
63262 + pax_close_kernel();
63263 pkmap_count[last_pkmap_nr] = 1;
63264 set_page_address(page, (void *)vaddr);
63265
63266 diff -urNp linux-2.6.32.42/mm/hugetlb.c linux-2.6.32.42/mm/hugetlb.c
63267 --- linux-2.6.32.42/mm/hugetlb.c 2011-06-25 12:55:35.000000000 -0400
63268 +++ linux-2.6.32.42/mm/hugetlb.c 2011-06-25 12:56:37.000000000 -0400
63269 @@ -1925,6 +1925,26 @@ static int unmap_ref_private(struct mm_s
63270 return 1;
63271 }
63272
63273 +#ifdef CONFIG_PAX_SEGMEXEC
63274 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
63275 +{
63276 + struct mm_struct *mm = vma->vm_mm;
63277 + struct vm_area_struct *vma_m;
63278 + unsigned long address_m;
63279 + pte_t *ptep_m;
63280 +
63281 + vma_m = pax_find_mirror_vma(vma);
63282 + if (!vma_m)
63283 + return;
63284 +
63285 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63286 + address_m = address + SEGMEXEC_TASK_SIZE;
63287 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
63288 + get_page(page_m);
63289 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
63290 +}
63291 +#endif
63292 +
63293 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
63294 unsigned long address, pte_t *ptep, pte_t pte,
63295 struct page *pagecache_page)
63296 @@ -1996,6 +2016,11 @@ retry_avoidcopy:
63297 huge_ptep_clear_flush(vma, address, ptep);
63298 set_huge_pte_at(mm, address, ptep,
63299 make_huge_pte(vma, new_page, 1));
63300 +
63301 +#ifdef CONFIG_PAX_SEGMEXEC
63302 + pax_mirror_huge_pte(vma, address, new_page);
63303 +#endif
63304 +
63305 /* Make the old page be freed below */
63306 new_page = old_page;
63307 }
63308 @@ -2127,6 +2152,10 @@ retry:
63309 && (vma->vm_flags & VM_SHARED)));
63310 set_huge_pte_at(mm, address, ptep, new_pte);
63311
63312 +#ifdef CONFIG_PAX_SEGMEXEC
63313 + pax_mirror_huge_pte(vma, address, page);
63314 +#endif
63315 +
63316 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
63317 /* Optimization, do the COW without a second fault */
63318 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
63319 @@ -2155,6 +2184,28 @@ int hugetlb_fault(struct mm_struct *mm,
63320 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
63321 struct hstate *h = hstate_vma(vma);
63322
63323 +#ifdef CONFIG_PAX_SEGMEXEC
63324 + struct vm_area_struct *vma_m;
63325 +
63326 + vma_m = pax_find_mirror_vma(vma);
63327 + if (vma_m) {
63328 + unsigned long address_m;
63329 +
63330 + if (vma->vm_start > vma_m->vm_start) {
63331 + address_m = address;
63332 + address -= SEGMEXEC_TASK_SIZE;
63333 + vma = vma_m;
63334 + h = hstate_vma(vma);
63335 + } else
63336 + address_m = address + SEGMEXEC_TASK_SIZE;
63337 +
63338 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
63339 + return VM_FAULT_OOM;
63340 + address_m &= HPAGE_MASK;
63341 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
63342 + }
63343 +#endif
63344 +
63345 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
63346 if (!ptep)
63347 return VM_FAULT_OOM;
63348 diff -urNp linux-2.6.32.42/mm/Kconfig linux-2.6.32.42/mm/Kconfig
63349 --- linux-2.6.32.42/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
63350 +++ linux-2.6.32.42/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
63351 @@ -228,7 +228,7 @@ config KSM
63352 config DEFAULT_MMAP_MIN_ADDR
63353 int "Low address space to protect from user allocation"
63354 depends on MMU
63355 - default 4096
63356 + default 65536
63357 help
63358 This is the portion of low virtual memory which should be protected
63359 from userspace allocation. Keeping a user from writing to low pages
63360 diff -urNp linux-2.6.32.42/mm/kmemleak.c linux-2.6.32.42/mm/kmemleak.c
63361 --- linux-2.6.32.42/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
63362 +++ linux-2.6.32.42/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
63363 @@ -358,7 +358,7 @@ static void print_unreferenced(struct se
63364
63365 for (i = 0; i < object->trace_len; i++) {
63366 void *ptr = (void *)object->trace[i];
63367 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
63368 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
63369 }
63370 }
63371
63372 diff -urNp linux-2.6.32.42/mm/ksm.c linux-2.6.32.42/mm/ksm.c
63373 --- linux-2.6.32.42/mm/ksm.c 2011-03-27 14:31:47.000000000 -0400
63374 +++ linux-2.6.32.42/mm/ksm.c 2011-06-20 19:38:36.000000000 -0400
63375 @@ -1215,6 +1215,12 @@ static struct rmap_item *scan_get_next_r
63376 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
63377 ksm_scan.mm_slot = slot;
63378 spin_unlock(&ksm_mmlist_lock);
63379 + /*
63380 + * Although we tested list_empty() above, a racing __ksm_exit
63381 + * of the last mm on the list may have removed it since then.
63382 + */
63383 + if (slot == &ksm_mm_head)
63384 + return NULL;
63385 next_mm:
63386 ksm_scan.address = 0;
63387 ksm_scan.rmap_item = list_entry(&slot->rmap_list,
63388 diff -urNp linux-2.6.32.42/mm/maccess.c linux-2.6.32.42/mm/maccess.c
63389 --- linux-2.6.32.42/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
63390 +++ linux-2.6.32.42/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
63391 @@ -14,7 +14,7 @@
63392 * Safely read from address @src to the buffer at @dst. If a kernel fault
63393 * happens, handle that and return -EFAULT.
63394 */
63395 -long probe_kernel_read(void *dst, void *src, size_t size)
63396 +long probe_kernel_read(void *dst, const void *src, size_t size)
63397 {
63398 long ret;
63399 mm_segment_t old_fs = get_fs();
63400 @@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
63401 * Safely write to address @dst from the buffer at @src. If a kernel fault
63402 * happens, handle that and return -EFAULT.
63403 */
63404 -long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
63405 +long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
63406 {
63407 long ret;
63408 mm_segment_t old_fs = get_fs();
63409 diff -urNp linux-2.6.32.42/mm/madvise.c linux-2.6.32.42/mm/madvise.c
63410 --- linux-2.6.32.42/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
63411 +++ linux-2.6.32.42/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
63412 @@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
63413 pgoff_t pgoff;
63414 unsigned long new_flags = vma->vm_flags;
63415
63416 +#ifdef CONFIG_PAX_SEGMEXEC
63417 + struct vm_area_struct *vma_m;
63418 +#endif
63419 +
63420 switch (behavior) {
63421 case MADV_NORMAL:
63422 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
63423 @@ -103,6 +107,13 @@ success:
63424 /*
63425 * vm_flags is protected by the mmap_sem held in write mode.
63426 */
63427 +
63428 +#ifdef CONFIG_PAX_SEGMEXEC
63429 + vma_m = pax_find_mirror_vma(vma);
63430 + if (vma_m)
63431 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
63432 +#endif
63433 +
63434 vma->vm_flags = new_flags;
63435
63436 out:
63437 @@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
63438 struct vm_area_struct ** prev,
63439 unsigned long start, unsigned long end)
63440 {
63441 +
63442 +#ifdef CONFIG_PAX_SEGMEXEC
63443 + struct vm_area_struct *vma_m;
63444 +#endif
63445 +
63446 *prev = vma;
63447 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
63448 return -EINVAL;
63449 @@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
63450 zap_page_range(vma, start, end - start, &details);
63451 } else
63452 zap_page_range(vma, start, end - start, NULL);
63453 +
63454 +#ifdef CONFIG_PAX_SEGMEXEC
63455 + vma_m = pax_find_mirror_vma(vma);
63456 + if (vma_m) {
63457 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
63458 + struct zap_details details = {
63459 + .nonlinear_vma = vma_m,
63460 + .last_index = ULONG_MAX,
63461 + };
63462 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
63463 + } else
63464 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
63465 + }
63466 +#endif
63467 +
63468 return 0;
63469 }
63470
63471 @@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
63472 if (end < start)
63473 goto out;
63474
63475 +#ifdef CONFIG_PAX_SEGMEXEC
63476 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63477 + if (end > SEGMEXEC_TASK_SIZE)
63478 + goto out;
63479 + } else
63480 +#endif
63481 +
63482 + if (end > TASK_SIZE)
63483 + goto out;
63484 +
63485 error = 0;
63486 if (end == start)
63487 goto out;
63488 diff -urNp linux-2.6.32.42/mm/memory.c linux-2.6.32.42/mm/memory.c
63489 --- linux-2.6.32.42/mm/memory.c 2011-03-27 14:31:47.000000000 -0400
63490 +++ linux-2.6.32.42/mm/memory.c 2011-04-17 15:56:46.000000000 -0400
63491 @@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
63492 return;
63493
63494 pmd = pmd_offset(pud, start);
63495 +
63496 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
63497 pud_clear(pud);
63498 pmd_free_tlb(tlb, pmd, start);
63499 +#endif
63500 +
63501 }
63502
63503 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
63504 @@ -219,9 +223,12 @@ static inline void free_pud_range(struct
63505 if (end - 1 > ceiling - 1)
63506 return;
63507
63508 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
63509 pud = pud_offset(pgd, start);
63510 pgd_clear(pgd);
63511 pud_free_tlb(tlb, pud, start);
63512 +#endif
63513 +
63514 }
63515
63516 /*
63517 @@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
63518 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
63519 i = 0;
63520
63521 - do {
63522 + while (nr_pages) {
63523 struct vm_area_struct *vma;
63524
63525 - vma = find_extend_vma(mm, start);
63526 + vma = find_vma(mm, start);
63527 if (!vma && in_gate_area(tsk, start)) {
63528 unsigned long pg = start & PAGE_MASK;
63529 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
63530 @@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
63531 continue;
63532 }
63533
63534 - if (!vma ||
63535 + if (!vma || start < vma->vm_start ||
63536 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
63537 !(vm_flags & vma->vm_flags))
63538 return i ? : -EFAULT;
63539 @@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
63540 start += PAGE_SIZE;
63541 nr_pages--;
63542 } while (nr_pages && start < vma->vm_end);
63543 - } while (nr_pages);
63544 + }
63545 return i;
63546 }
63547
63548 @@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
63549 page_add_file_rmap(page);
63550 set_pte_at(mm, addr, pte, mk_pte(page, prot));
63551
63552 +#ifdef CONFIG_PAX_SEGMEXEC
63553 + pax_mirror_file_pte(vma, addr, page, ptl);
63554 +#endif
63555 +
63556 retval = 0;
63557 pte_unmap_unlock(pte, ptl);
63558 return retval;
63559 @@ -1560,10 +1571,22 @@ out:
63560 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
63561 struct page *page)
63562 {
63563 +
63564 +#ifdef CONFIG_PAX_SEGMEXEC
63565 + struct vm_area_struct *vma_m;
63566 +#endif
63567 +
63568 if (addr < vma->vm_start || addr >= vma->vm_end)
63569 return -EFAULT;
63570 if (!page_count(page))
63571 return -EINVAL;
63572 +
63573 +#ifdef CONFIG_PAX_SEGMEXEC
63574 + vma_m = pax_find_mirror_vma(vma);
63575 + if (vma_m)
63576 + vma_m->vm_flags |= VM_INSERTPAGE;
63577 +#endif
63578 +
63579 vma->vm_flags |= VM_INSERTPAGE;
63580 return insert_page(vma, addr, page, vma->vm_page_prot);
63581 }
63582 @@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
63583 unsigned long pfn)
63584 {
63585 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
63586 + BUG_ON(vma->vm_mirror);
63587
63588 if (addr < vma->vm_start || addr >= vma->vm_end)
63589 return -EFAULT;
63590 @@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
63591 copy_user_highpage(dst, src, va, vma);
63592 }
63593
63594 +#ifdef CONFIG_PAX_SEGMEXEC
63595 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
63596 +{
63597 + struct mm_struct *mm = vma->vm_mm;
63598 + spinlock_t *ptl;
63599 + pte_t *pte, entry;
63600 +
63601 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
63602 + entry = *pte;
63603 + if (!pte_present(entry)) {
63604 + if (!pte_none(entry)) {
63605 + BUG_ON(pte_file(entry));
63606 + free_swap_and_cache(pte_to_swp_entry(entry));
63607 + pte_clear_not_present_full(mm, address, pte, 0);
63608 + }
63609 + } else {
63610 + struct page *page;
63611 +
63612 + flush_cache_page(vma, address, pte_pfn(entry));
63613 + entry = ptep_clear_flush(vma, address, pte);
63614 + BUG_ON(pte_dirty(entry));
63615 + page = vm_normal_page(vma, address, entry);
63616 + if (page) {
63617 + update_hiwater_rss(mm);
63618 + if (PageAnon(page))
63619 + dec_mm_counter(mm, anon_rss);
63620 + else
63621 + dec_mm_counter(mm, file_rss);
63622 + page_remove_rmap(page);
63623 + page_cache_release(page);
63624 + }
63625 + }
63626 + pte_unmap_unlock(pte, ptl);
63627 +}
63628 +
63629 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
63630 + *
63631 + * the ptl of the lower mapped page is held on entry and is not released on exit
63632 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
63633 + */
63634 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63635 +{
63636 + struct mm_struct *mm = vma->vm_mm;
63637 + unsigned long address_m;
63638 + spinlock_t *ptl_m;
63639 + struct vm_area_struct *vma_m;
63640 + pmd_t *pmd_m;
63641 + pte_t *pte_m, entry_m;
63642 +
63643 + BUG_ON(!page_m || !PageAnon(page_m));
63644 +
63645 + vma_m = pax_find_mirror_vma(vma);
63646 + if (!vma_m)
63647 + return;
63648 +
63649 + BUG_ON(!PageLocked(page_m));
63650 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63651 + address_m = address + SEGMEXEC_TASK_SIZE;
63652 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63653 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63654 + ptl_m = pte_lockptr(mm, pmd_m);
63655 + if (ptl != ptl_m) {
63656 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63657 + if (!pte_none(*pte_m))
63658 + goto out;
63659 + }
63660 +
63661 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63662 + page_cache_get(page_m);
63663 + page_add_anon_rmap(page_m, vma_m, address_m);
63664 + inc_mm_counter(mm, anon_rss);
63665 + set_pte_at(mm, address_m, pte_m, entry_m);
63666 + update_mmu_cache(vma_m, address_m, entry_m);
63667 +out:
63668 + if (ptl != ptl_m)
63669 + spin_unlock(ptl_m);
63670 + pte_unmap_nested(pte_m);
63671 + unlock_page(page_m);
63672 +}
63673 +
63674 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
63675 +{
63676 + struct mm_struct *mm = vma->vm_mm;
63677 + unsigned long address_m;
63678 + spinlock_t *ptl_m;
63679 + struct vm_area_struct *vma_m;
63680 + pmd_t *pmd_m;
63681 + pte_t *pte_m, entry_m;
63682 +
63683 + BUG_ON(!page_m || PageAnon(page_m));
63684 +
63685 + vma_m = pax_find_mirror_vma(vma);
63686 + if (!vma_m)
63687 + return;
63688 +
63689 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63690 + address_m = address + SEGMEXEC_TASK_SIZE;
63691 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63692 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63693 + ptl_m = pte_lockptr(mm, pmd_m);
63694 + if (ptl != ptl_m) {
63695 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63696 + if (!pte_none(*pte_m))
63697 + goto out;
63698 + }
63699 +
63700 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
63701 + page_cache_get(page_m);
63702 + page_add_file_rmap(page_m);
63703 + inc_mm_counter(mm, file_rss);
63704 + set_pte_at(mm, address_m, pte_m, entry_m);
63705 + update_mmu_cache(vma_m, address_m, entry_m);
63706 +out:
63707 + if (ptl != ptl_m)
63708 + spin_unlock(ptl_m);
63709 + pte_unmap_nested(pte_m);
63710 +}
63711 +
63712 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
63713 +{
63714 + struct mm_struct *mm = vma->vm_mm;
63715 + unsigned long address_m;
63716 + spinlock_t *ptl_m;
63717 + struct vm_area_struct *vma_m;
63718 + pmd_t *pmd_m;
63719 + pte_t *pte_m, entry_m;
63720 +
63721 + vma_m = pax_find_mirror_vma(vma);
63722 + if (!vma_m)
63723 + return;
63724 +
63725 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
63726 + address_m = address + SEGMEXEC_TASK_SIZE;
63727 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
63728 + pte_m = pte_offset_map_nested(pmd_m, address_m);
63729 + ptl_m = pte_lockptr(mm, pmd_m);
63730 + if (ptl != ptl_m) {
63731 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
63732 + if (!pte_none(*pte_m))
63733 + goto out;
63734 + }
63735 +
63736 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
63737 + set_pte_at(mm, address_m, pte_m, entry_m);
63738 +out:
63739 + if (ptl != ptl_m)
63740 + spin_unlock(ptl_m);
63741 + pte_unmap_nested(pte_m);
63742 +}
63743 +
63744 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
63745 +{
63746 + struct page *page_m;
63747 + pte_t entry;
63748 +
63749 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
63750 + goto out;
63751 +
63752 + entry = *pte;
63753 + page_m = vm_normal_page(vma, address, entry);
63754 + if (!page_m)
63755 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
63756 + else if (PageAnon(page_m)) {
63757 + if (pax_find_mirror_vma(vma)) {
63758 + pte_unmap_unlock(pte, ptl);
63759 + lock_page(page_m);
63760 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
63761 + if (pte_same(entry, *pte))
63762 + pax_mirror_anon_pte(vma, address, page_m, ptl);
63763 + else
63764 + unlock_page(page_m);
63765 + }
63766 + } else
63767 + pax_mirror_file_pte(vma, address, page_m, ptl);
63768 +
63769 +out:
63770 + pte_unmap_unlock(pte, ptl);
63771 +}
63772 +#endif
63773 +
63774 /*
63775 * This routine handles present pages, when users try to write
63776 * to a shared page. It is done by copying the page to a new address
63777 @@ -2156,6 +2360,12 @@ gotten:
63778 */
63779 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63780 if (likely(pte_same(*page_table, orig_pte))) {
63781 +
63782 +#ifdef CONFIG_PAX_SEGMEXEC
63783 + if (pax_find_mirror_vma(vma))
63784 + BUG_ON(!trylock_page(new_page));
63785 +#endif
63786 +
63787 if (old_page) {
63788 if (!PageAnon(old_page)) {
63789 dec_mm_counter(mm, file_rss);
63790 @@ -2207,6 +2417,10 @@ gotten:
63791 page_remove_rmap(old_page);
63792 }
63793
63794 +#ifdef CONFIG_PAX_SEGMEXEC
63795 + pax_mirror_anon_pte(vma, address, new_page, ptl);
63796 +#endif
63797 +
63798 /* Free the old page.. */
63799 new_page = old_page;
63800 ret |= VM_FAULT_WRITE;
63801 @@ -2604,6 +2818,11 @@ static int do_swap_page(struct mm_struct
63802 swap_free(entry);
63803 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
63804 try_to_free_swap(page);
63805 +
63806 +#ifdef CONFIG_PAX_SEGMEXEC
63807 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
63808 +#endif
63809 +
63810 unlock_page(page);
63811
63812 if (flags & FAULT_FLAG_WRITE) {
63813 @@ -2615,6 +2834,11 @@ static int do_swap_page(struct mm_struct
63814
63815 /* No need to invalidate - it was non-present before */
63816 update_mmu_cache(vma, address, pte);
63817 +
63818 +#ifdef CONFIG_PAX_SEGMEXEC
63819 + pax_mirror_anon_pte(vma, address, page, ptl);
63820 +#endif
63821 +
63822 unlock:
63823 pte_unmap_unlock(page_table, ptl);
63824 out:
63825 @@ -2630,40 +2854,6 @@ out_release:
63826 }
63827
63828 /*
63829 - * This is like a special single-page "expand_{down|up}wards()",
63830 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
63831 - * doesn't hit another vma.
63832 - */
63833 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
63834 -{
63835 - address &= PAGE_MASK;
63836 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
63837 - struct vm_area_struct *prev = vma->vm_prev;
63838 -
63839 - /*
63840 - * Is there a mapping abutting this one below?
63841 - *
63842 - * That's only ok if it's the same stack mapping
63843 - * that has gotten split..
63844 - */
63845 - if (prev && prev->vm_end == address)
63846 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
63847 -
63848 - expand_stack(vma, address - PAGE_SIZE);
63849 - }
63850 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
63851 - struct vm_area_struct *next = vma->vm_next;
63852 -
63853 - /* As VM_GROWSDOWN but s/below/above/ */
63854 - if (next && next->vm_start == address + PAGE_SIZE)
63855 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
63856 -
63857 - expand_upwards(vma, address + PAGE_SIZE);
63858 - }
63859 - return 0;
63860 -}
63861 -
63862 -/*
63863 * We enter with non-exclusive mmap_sem (to exclude vma changes,
63864 * but allow concurrent faults), and pte mapped but not yet locked.
63865 * We return with mmap_sem still held, but pte unmapped and unlocked.
63866 @@ -2672,27 +2862,23 @@ static int do_anonymous_page(struct mm_s
63867 unsigned long address, pte_t *page_table, pmd_t *pmd,
63868 unsigned int flags)
63869 {
63870 - struct page *page;
63871 + struct page *page = NULL;
63872 spinlock_t *ptl;
63873 pte_t entry;
63874
63875 - pte_unmap(page_table);
63876 -
63877 - /* Check if we need to add a guard page to the stack */
63878 - if (check_stack_guard_page(vma, address) < 0)
63879 - return VM_FAULT_SIGBUS;
63880 -
63881 - /* Use the zero-page for reads */
63882 if (!(flags & FAULT_FLAG_WRITE)) {
63883 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
63884 vma->vm_page_prot));
63885 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
63886 + ptl = pte_lockptr(mm, pmd);
63887 + spin_lock(ptl);
63888 if (!pte_none(*page_table))
63889 goto unlock;
63890 goto setpte;
63891 }
63892
63893 /* Allocate our own private page. */
63894 + pte_unmap(page_table);
63895 +
63896 if (unlikely(anon_vma_prepare(vma)))
63897 goto oom;
63898 page = alloc_zeroed_user_highpage_movable(vma, address);
63899 @@ -2711,6 +2897,11 @@ static int do_anonymous_page(struct mm_s
63900 if (!pte_none(*page_table))
63901 goto release;
63902
63903 +#ifdef CONFIG_PAX_SEGMEXEC
63904 + if (pax_find_mirror_vma(vma))
63905 + BUG_ON(!trylock_page(page));
63906 +#endif
63907 +
63908 inc_mm_counter(mm, anon_rss);
63909 page_add_new_anon_rmap(page, vma, address);
63910 setpte:
63911 @@ -2718,6 +2909,12 @@ setpte:
63912
63913 /* No need to invalidate - it was non-present before */
63914 update_mmu_cache(vma, address, entry);
63915 +
63916 +#ifdef CONFIG_PAX_SEGMEXEC
63917 + if (page)
63918 + pax_mirror_anon_pte(vma, address, page, ptl);
63919 +#endif
63920 +
63921 unlock:
63922 pte_unmap_unlock(page_table, ptl);
63923 return 0;
63924 @@ -2860,6 +3057,12 @@ static int __do_fault(struct mm_struct *
63925 */
63926 /* Only go through if we didn't race with anybody else... */
63927 if (likely(pte_same(*page_table, orig_pte))) {
63928 +
63929 +#ifdef CONFIG_PAX_SEGMEXEC
63930 + if (anon && pax_find_mirror_vma(vma))
63931 + BUG_ON(!trylock_page(page));
63932 +#endif
63933 +
63934 flush_icache_page(vma, page);
63935 entry = mk_pte(page, vma->vm_page_prot);
63936 if (flags & FAULT_FLAG_WRITE)
63937 @@ -2879,6 +3082,14 @@ static int __do_fault(struct mm_struct *
63938
63939 /* no need to invalidate: a not-present page won't be cached */
63940 update_mmu_cache(vma, address, entry);
63941 +
63942 +#ifdef CONFIG_PAX_SEGMEXEC
63943 + if (anon)
63944 + pax_mirror_anon_pte(vma, address, page, ptl);
63945 + else
63946 + pax_mirror_file_pte(vma, address, page, ptl);
63947 +#endif
63948 +
63949 } else {
63950 if (charged)
63951 mem_cgroup_uncharge_page(page);
63952 @@ -3026,6 +3237,12 @@ static inline int handle_pte_fault(struc
63953 if (flags & FAULT_FLAG_WRITE)
63954 flush_tlb_page(vma, address);
63955 }
63956 +
63957 +#ifdef CONFIG_PAX_SEGMEXEC
63958 + pax_mirror_pte(vma, address, pte, pmd, ptl);
63959 + return 0;
63960 +#endif
63961 +
63962 unlock:
63963 pte_unmap_unlock(pte, ptl);
63964 return 0;
63965 @@ -3042,6 +3259,10 @@ int handle_mm_fault(struct mm_struct *mm
63966 pmd_t *pmd;
63967 pte_t *pte;
63968
63969 +#ifdef CONFIG_PAX_SEGMEXEC
63970 + struct vm_area_struct *vma_m;
63971 +#endif
63972 +
63973 __set_current_state(TASK_RUNNING);
63974
63975 count_vm_event(PGFAULT);
63976 @@ -3049,6 +3270,34 @@ int handle_mm_fault(struct mm_struct *mm
63977 if (unlikely(is_vm_hugetlb_page(vma)))
63978 return hugetlb_fault(mm, vma, address, flags);
63979
63980 +#ifdef CONFIG_PAX_SEGMEXEC
63981 + vma_m = pax_find_mirror_vma(vma);
63982 + if (vma_m) {
63983 + unsigned long address_m;
63984 + pgd_t *pgd_m;
63985 + pud_t *pud_m;
63986 + pmd_t *pmd_m;
63987 +
63988 + if (vma->vm_start > vma_m->vm_start) {
63989 + address_m = address;
63990 + address -= SEGMEXEC_TASK_SIZE;
63991 + vma = vma_m;
63992 + } else
63993 + address_m = address + SEGMEXEC_TASK_SIZE;
63994 +
63995 + pgd_m = pgd_offset(mm, address_m);
63996 + pud_m = pud_alloc(mm, pgd_m, address_m);
63997 + if (!pud_m)
63998 + return VM_FAULT_OOM;
63999 + pmd_m = pmd_alloc(mm, pud_m, address_m);
64000 + if (!pmd_m)
64001 + return VM_FAULT_OOM;
64002 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
64003 + return VM_FAULT_OOM;
64004 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
64005 + }
64006 +#endif
64007 +
64008 pgd = pgd_offset(mm, address);
64009 pud = pud_alloc(mm, pgd, address);
64010 if (!pud)
64011 @@ -3146,7 +3395,7 @@ static int __init gate_vma_init(void)
64012 gate_vma.vm_start = FIXADDR_USER_START;
64013 gate_vma.vm_end = FIXADDR_USER_END;
64014 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
64015 - gate_vma.vm_page_prot = __P101;
64016 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
64017 /*
64018 * Make sure the vDSO gets into every core dump.
64019 * Dumping its contents makes post-mortem fully interpretable later
64020 diff -urNp linux-2.6.32.42/mm/memory-failure.c linux-2.6.32.42/mm/memory-failure.c
64021 --- linux-2.6.32.42/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
64022 +++ linux-2.6.32.42/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
64023 @@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
64024
64025 int sysctl_memory_failure_recovery __read_mostly = 1;
64026
64027 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64028 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64029
64030 /*
64031 * Send all the processes who have the page mapped an ``action optional''
64032 @@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
64033 return 0;
64034 }
64035
64036 - atomic_long_add(1, &mce_bad_pages);
64037 + atomic_long_add_unchecked(1, &mce_bad_pages);
64038
64039 /*
64040 * We need/can do nothing about count=0 pages.
64041 diff -urNp linux-2.6.32.42/mm/mempolicy.c linux-2.6.32.42/mm/mempolicy.c
64042 --- linux-2.6.32.42/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
64043 +++ linux-2.6.32.42/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
64044 @@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
64045 struct vm_area_struct *next;
64046 int err;
64047
64048 +#ifdef CONFIG_PAX_SEGMEXEC
64049 + struct vm_area_struct *vma_m;
64050 +#endif
64051 +
64052 err = 0;
64053 for (; vma && vma->vm_start < end; vma = next) {
64054 next = vma->vm_next;
64055 @@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
64056 err = policy_vma(vma, new);
64057 if (err)
64058 break;
64059 +
64060 +#ifdef CONFIG_PAX_SEGMEXEC
64061 + vma_m = pax_find_mirror_vma(vma);
64062 + if (vma_m) {
64063 + err = policy_vma(vma_m, new);
64064 + if (err)
64065 + break;
64066 + }
64067 +#endif
64068 +
64069 }
64070 return err;
64071 }
64072 @@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
64073
64074 if (end < start)
64075 return -EINVAL;
64076 +
64077 +#ifdef CONFIG_PAX_SEGMEXEC
64078 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
64079 + if (end > SEGMEXEC_TASK_SIZE)
64080 + return -EINVAL;
64081 + } else
64082 +#endif
64083 +
64084 + if (end > TASK_SIZE)
64085 + return -EINVAL;
64086 +
64087 if (end == start)
64088 return 0;
64089
64090 @@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64091 if (!mm)
64092 return -EINVAL;
64093
64094 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64095 + if (mm != current->mm &&
64096 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64097 + err = -EPERM;
64098 + goto out;
64099 + }
64100 +#endif
64101 +
64102 /*
64103 * Check if this process has the right to modify the specified
64104 * process. The right exists if the process has administrative
64105 @@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64106 rcu_read_lock();
64107 tcred = __task_cred(task);
64108 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64109 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
64110 - !capable(CAP_SYS_NICE)) {
64111 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64112 rcu_read_unlock();
64113 err = -EPERM;
64114 goto out;
64115 @@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
64116
64117 if (file) {
64118 seq_printf(m, " file=");
64119 - seq_path(m, &file->f_path, "\n\t= ");
64120 + seq_path(m, &file->f_path, "\n\t\\= ");
64121 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
64122 seq_printf(m, " heap");
64123 } else if (vma->vm_start <= mm->start_stack &&
64124 diff -urNp linux-2.6.32.42/mm/migrate.c linux-2.6.32.42/mm/migrate.c
64125 --- linux-2.6.32.42/mm/migrate.c 2011-03-27 14:31:47.000000000 -0400
64126 +++ linux-2.6.32.42/mm/migrate.c 2011-05-16 21:46:57.000000000 -0400
64127 @@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
64128 unsigned long chunk_start;
64129 int err;
64130
64131 + pax_track_stack();
64132 +
64133 task_nodes = cpuset_mems_allowed(task);
64134
64135 err = -ENOMEM;
64136 @@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64137 if (!mm)
64138 return -EINVAL;
64139
64140 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64141 + if (mm != current->mm &&
64142 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64143 + err = -EPERM;
64144 + goto out;
64145 + }
64146 +#endif
64147 +
64148 /*
64149 * Check if this process has the right to modify the specified
64150 * process. The right exists if the process has administrative
64151 @@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64152 rcu_read_lock();
64153 tcred = __task_cred(task);
64154 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64155 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
64156 - !capable(CAP_SYS_NICE)) {
64157 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64158 rcu_read_unlock();
64159 err = -EPERM;
64160 goto out;
64161 diff -urNp linux-2.6.32.42/mm/mlock.c linux-2.6.32.42/mm/mlock.c
64162 --- linux-2.6.32.42/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
64163 +++ linux-2.6.32.42/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
64164 @@ -13,6 +13,7 @@
64165 #include <linux/pagemap.h>
64166 #include <linux/mempolicy.h>
64167 #include <linux/syscalls.h>
64168 +#include <linux/security.h>
64169 #include <linux/sched.h>
64170 #include <linux/module.h>
64171 #include <linux/rmap.h>
64172 @@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
64173 }
64174 }
64175
64176 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
64177 -{
64178 - return (vma->vm_flags & VM_GROWSDOWN) &&
64179 - (vma->vm_start == addr) &&
64180 - !vma_stack_continue(vma->vm_prev, addr);
64181 -}
64182 -
64183 /**
64184 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
64185 * @vma: target vma
64186 @@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
64187 if (vma->vm_flags & VM_WRITE)
64188 gup_flags |= FOLL_WRITE;
64189
64190 - /* We don't try to access the guard page of a stack vma */
64191 - if (stack_guard_page(vma, start)) {
64192 - addr += PAGE_SIZE;
64193 - nr_pages--;
64194 - }
64195 -
64196 while (nr_pages > 0) {
64197 int i;
64198
64199 @@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
64200 {
64201 unsigned long nstart, end, tmp;
64202 struct vm_area_struct * vma, * prev;
64203 - int error;
64204 + int error = -EINVAL;
64205
64206 len = PAGE_ALIGN(len);
64207 end = start + len;
64208 @@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
64209 return -EINVAL;
64210 if (end == start)
64211 return 0;
64212 + if (end > TASK_SIZE)
64213 + return -EINVAL;
64214 +
64215 vma = find_vma_prev(current->mm, start, &prev);
64216 if (!vma || vma->vm_start > start)
64217 return -ENOMEM;
64218 @@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
64219 for (nstart = start ; ; ) {
64220 unsigned int newflags;
64221
64222 +#ifdef CONFIG_PAX_SEGMEXEC
64223 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64224 + break;
64225 +#endif
64226 +
64227 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
64228
64229 newflags = vma->vm_flags | VM_LOCKED;
64230 @@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
64231 lock_limit >>= PAGE_SHIFT;
64232
64233 /* check against resource limits */
64234 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
64235 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
64236 error = do_mlock(start, len, 1);
64237 up_write(&current->mm->mmap_sem);
64238 @@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
64239 static int do_mlockall(int flags)
64240 {
64241 struct vm_area_struct * vma, * prev = NULL;
64242 - unsigned int def_flags = 0;
64243
64244 if (flags & MCL_FUTURE)
64245 - def_flags = VM_LOCKED;
64246 - current->mm->def_flags = def_flags;
64247 + current->mm->def_flags |= VM_LOCKED;
64248 + else
64249 + current->mm->def_flags &= ~VM_LOCKED;
64250 if (flags == MCL_FUTURE)
64251 goto out;
64252
64253 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
64254 - unsigned int newflags;
64255 + unsigned long newflags;
64256 +
64257 +#ifdef CONFIG_PAX_SEGMEXEC
64258 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
64259 + break;
64260 +#endif
64261
64262 + BUG_ON(vma->vm_end > TASK_SIZE);
64263 newflags = vma->vm_flags | VM_LOCKED;
64264 if (!(flags & MCL_CURRENT))
64265 newflags &= ~VM_LOCKED;
64266 @@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
64267 lock_limit >>= PAGE_SHIFT;
64268
64269 ret = -ENOMEM;
64270 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
64271 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
64272 capable(CAP_IPC_LOCK))
64273 ret = do_mlockall(flags);
64274 diff -urNp linux-2.6.32.42/mm/mmap.c linux-2.6.32.42/mm/mmap.c
64275 --- linux-2.6.32.42/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
64276 +++ linux-2.6.32.42/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
64277 @@ -45,6 +45,16 @@
64278 #define arch_rebalance_pgtables(addr, len) (addr)
64279 #endif
64280
64281 +static inline void verify_mm_writelocked(struct mm_struct *mm)
64282 +{
64283 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
64284 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
64285 + up_read(&mm->mmap_sem);
64286 + BUG();
64287 + }
64288 +#endif
64289 +}
64290 +
64291 static void unmap_region(struct mm_struct *mm,
64292 struct vm_area_struct *vma, struct vm_area_struct *prev,
64293 unsigned long start, unsigned long end);
64294 @@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
64295 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
64296 *
64297 */
64298 -pgprot_t protection_map[16] = {
64299 +pgprot_t protection_map[16] __read_only = {
64300 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
64301 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
64302 };
64303
64304 pgprot_t vm_get_page_prot(unsigned long vm_flags)
64305 {
64306 - return __pgprot(pgprot_val(protection_map[vm_flags &
64307 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
64308 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
64309 pgprot_val(arch_vm_get_page_prot(vm_flags)));
64310 +
64311 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64312 + if (!nx_enabled &&
64313 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
64314 + (vm_flags & (VM_READ | VM_WRITE)))
64315 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
64316 +#endif
64317 +
64318 + return prot;
64319 }
64320 EXPORT_SYMBOL(vm_get_page_prot);
64321
64322 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
64323 int sysctl_overcommit_ratio = 50; /* default is 50% */
64324 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
64325 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
64326 struct percpu_counter vm_committed_as;
64327
64328 /*
64329 @@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
64330 struct vm_area_struct *next = vma->vm_next;
64331
64332 might_sleep();
64333 + BUG_ON(vma->vm_mirror);
64334 if (vma->vm_ops && vma->vm_ops->close)
64335 vma->vm_ops->close(vma);
64336 if (vma->vm_file) {
64337 @@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
64338 * not page aligned -Ram Gupta
64339 */
64340 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
64341 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
64342 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
64343 (mm->end_data - mm->start_data) > rlim)
64344 goto out;
64345 @@ -704,6 +726,12 @@ static int
64346 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
64347 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64348 {
64349 +
64350 +#ifdef CONFIG_PAX_SEGMEXEC
64351 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
64352 + return 0;
64353 +#endif
64354 +
64355 if (is_mergeable_vma(vma, file, vm_flags) &&
64356 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
64357 if (vma->vm_pgoff == vm_pgoff)
64358 @@ -723,6 +751,12 @@ static int
64359 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
64360 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
64361 {
64362 +
64363 +#ifdef CONFIG_PAX_SEGMEXEC
64364 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
64365 + return 0;
64366 +#endif
64367 +
64368 if (is_mergeable_vma(vma, file, vm_flags) &&
64369 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
64370 pgoff_t vm_pglen;
64371 @@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
64372 struct vm_area_struct *vma_merge(struct mm_struct *mm,
64373 struct vm_area_struct *prev, unsigned long addr,
64374 unsigned long end, unsigned long vm_flags,
64375 - struct anon_vma *anon_vma, struct file *file,
64376 + struct anon_vma *anon_vma, struct file *file,
64377 pgoff_t pgoff, struct mempolicy *policy)
64378 {
64379 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
64380 struct vm_area_struct *area, *next;
64381
64382 +#ifdef CONFIG_PAX_SEGMEXEC
64383 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
64384 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
64385 +
64386 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
64387 +#endif
64388 +
64389 /*
64390 * We later require that vma->vm_flags == vm_flags,
64391 * so this tests vma->vm_flags & VM_SPECIAL, too.
64392 @@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
64393 if (next && next->vm_end == end) /* cases 6, 7, 8 */
64394 next = next->vm_next;
64395
64396 +#ifdef CONFIG_PAX_SEGMEXEC
64397 + if (prev)
64398 + prev_m = pax_find_mirror_vma(prev);
64399 + if (area)
64400 + area_m = pax_find_mirror_vma(area);
64401 + if (next)
64402 + next_m = pax_find_mirror_vma(next);
64403 +#endif
64404 +
64405 /*
64406 * Can it merge with the predecessor?
64407 */
64408 @@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
64409 /* cases 1, 6 */
64410 vma_adjust(prev, prev->vm_start,
64411 next->vm_end, prev->vm_pgoff, NULL);
64412 - } else /* cases 2, 5, 7 */
64413 +
64414 +#ifdef CONFIG_PAX_SEGMEXEC
64415 + if (prev_m)
64416 + vma_adjust(prev_m, prev_m->vm_start,
64417 + next_m->vm_end, prev_m->vm_pgoff, NULL);
64418 +#endif
64419 +
64420 + } else { /* cases 2, 5, 7 */
64421 vma_adjust(prev, prev->vm_start,
64422 end, prev->vm_pgoff, NULL);
64423 +
64424 +#ifdef CONFIG_PAX_SEGMEXEC
64425 + if (prev_m)
64426 + vma_adjust(prev_m, prev_m->vm_start,
64427 + end_m, prev_m->vm_pgoff, NULL);
64428 +#endif
64429 +
64430 + }
64431 return prev;
64432 }
64433
64434 @@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
64435 mpol_equal(policy, vma_policy(next)) &&
64436 can_vma_merge_before(next, vm_flags,
64437 anon_vma, file, pgoff+pglen)) {
64438 - if (prev && addr < prev->vm_end) /* case 4 */
64439 + if (prev && addr < prev->vm_end) { /* case 4 */
64440 vma_adjust(prev, prev->vm_start,
64441 addr, prev->vm_pgoff, NULL);
64442 - else /* cases 3, 8 */
64443 +
64444 +#ifdef CONFIG_PAX_SEGMEXEC
64445 + if (prev_m)
64446 + vma_adjust(prev_m, prev_m->vm_start,
64447 + addr_m, prev_m->vm_pgoff, NULL);
64448 +#endif
64449 +
64450 + } else { /* cases 3, 8 */
64451 vma_adjust(area, addr, next->vm_end,
64452 next->vm_pgoff - pglen, NULL);
64453 +
64454 +#ifdef CONFIG_PAX_SEGMEXEC
64455 + if (area_m)
64456 + vma_adjust(area_m, addr_m, next_m->vm_end,
64457 + next_m->vm_pgoff - pglen, NULL);
64458 +#endif
64459 +
64460 + }
64461 return area;
64462 }
64463
64464 @@ -898,14 +978,11 @@ none:
64465 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
64466 struct file *file, long pages)
64467 {
64468 - const unsigned long stack_flags
64469 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
64470 -
64471 if (file) {
64472 mm->shared_vm += pages;
64473 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
64474 mm->exec_vm += pages;
64475 - } else if (flags & stack_flags)
64476 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
64477 mm->stack_vm += pages;
64478 if (flags & (VM_RESERVED|VM_IO))
64479 mm->reserved_vm += pages;
64480 @@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
64481 * (the exception is when the underlying filesystem is noexec
64482 * mounted, in which case we dont add PROT_EXEC.)
64483 */
64484 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
64485 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
64486 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
64487 prot |= PROT_EXEC;
64488
64489 @@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
64490 /* Obtain the address to map to. we verify (or select) it and ensure
64491 * that it represents a valid section of the address space.
64492 */
64493 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
64494 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
64495 if (addr & ~PAGE_MASK)
64496 return addr;
64497
64498 @@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
64499 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
64500 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
64501
64502 +#ifdef CONFIG_PAX_MPROTECT
64503 + if (mm->pax_flags & MF_PAX_MPROTECT) {
64504 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
64505 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
64506 + gr_log_rwxmmap(file);
64507 +
64508 +#ifdef CONFIG_PAX_EMUPLT
64509 + vm_flags &= ~VM_EXEC;
64510 +#else
64511 + return -EPERM;
64512 +#endif
64513 +
64514 + }
64515 +
64516 + if (!(vm_flags & VM_EXEC))
64517 + vm_flags &= ~VM_MAYEXEC;
64518 +#else
64519 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
64520 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
64521 +#endif
64522 + else
64523 + vm_flags &= ~VM_MAYWRITE;
64524 + }
64525 +#endif
64526 +
64527 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64528 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
64529 + vm_flags &= ~VM_PAGEEXEC;
64530 +#endif
64531 +
64532 if (flags & MAP_LOCKED)
64533 if (!can_do_mlock())
64534 return -EPERM;
64535 @@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
64536 locked += mm->locked_vm;
64537 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
64538 lock_limit >>= PAGE_SHIFT;
64539 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
64540 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
64541 return -EAGAIN;
64542 }
64543 @@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
64544 if (error)
64545 return error;
64546
64547 + if (!gr_acl_handle_mmap(file, prot))
64548 + return -EACCES;
64549 +
64550 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
64551 }
64552 EXPORT_SYMBOL(do_mmap_pgoff);
64553 @@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
64554 */
64555 int vma_wants_writenotify(struct vm_area_struct *vma)
64556 {
64557 - unsigned int vm_flags = vma->vm_flags;
64558 + unsigned long vm_flags = vma->vm_flags;
64559
64560 /* If it was private or non-writable, the write bit is already clear */
64561 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
64562 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
64563 return 0;
64564
64565 /* The backer wishes to know when pages are first written to? */
64566 @@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
64567 unsigned long charged = 0;
64568 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
64569
64570 +#ifdef CONFIG_PAX_SEGMEXEC
64571 + struct vm_area_struct *vma_m = NULL;
64572 +#endif
64573 +
64574 + /*
64575 + * mm->mmap_sem is required to protect against another thread
64576 + * changing the mappings in case we sleep.
64577 + */
64578 + verify_mm_writelocked(mm);
64579 +
64580 /* Clear old maps */
64581 error = -ENOMEM;
64582 -munmap_back:
64583 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64584 if (vma && vma->vm_start < addr + len) {
64585 if (do_munmap(mm, addr, len))
64586 return -ENOMEM;
64587 - goto munmap_back;
64588 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
64589 + BUG_ON(vma && vma->vm_start < addr + len);
64590 }
64591
64592 /* Check against address space limit. */
64593 @@ -1173,6 +1294,16 @@ munmap_back:
64594 goto unacct_error;
64595 }
64596
64597 +#ifdef CONFIG_PAX_SEGMEXEC
64598 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
64599 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
64600 + if (!vma_m) {
64601 + error = -ENOMEM;
64602 + goto free_vma;
64603 + }
64604 + }
64605 +#endif
64606 +
64607 vma->vm_mm = mm;
64608 vma->vm_start = addr;
64609 vma->vm_end = addr + len;
64610 @@ -1195,6 +1326,19 @@ munmap_back:
64611 error = file->f_op->mmap(file, vma);
64612 if (error)
64613 goto unmap_and_free_vma;
64614 +
64615 +#ifdef CONFIG_PAX_SEGMEXEC
64616 + if (vma_m && (vm_flags & VM_EXECUTABLE))
64617 + added_exe_file_vma(mm);
64618 +#endif
64619 +
64620 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
64621 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
64622 + vma->vm_flags |= VM_PAGEEXEC;
64623 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
64624 + }
64625 +#endif
64626 +
64627 if (vm_flags & VM_EXECUTABLE)
64628 added_exe_file_vma(mm);
64629
64630 @@ -1218,6 +1362,11 @@ munmap_back:
64631 vma_link(mm, vma, prev, rb_link, rb_parent);
64632 file = vma->vm_file;
64633
64634 +#ifdef CONFIG_PAX_SEGMEXEC
64635 + if (vma_m)
64636 + pax_mirror_vma(vma_m, vma);
64637 +#endif
64638 +
64639 /* Once vma denies write, undo our temporary denial count */
64640 if (correct_wcount)
64641 atomic_inc(&inode->i_writecount);
64642 @@ -1226,6 +1375,7 @@ out:
64643
64644 mm->total_vm += len >> PAGE_SHIFT;
64645 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
64646 + track_exec_limit(mm, addr, addr + len, vm_flags);
64647 if (vm_flags & VM_LOCKED) {
64648 /*
64649 * makes pages present; downgrades, drops, reacquires mmap_sem
64650 @@ -1248,6 +1398,12 @@ unmap_and_free_vma:
64651 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
64652 charged = 0;
64653 free_vma:
64654 +
64655 +#ifdef CONFIG_PAX_SEGMEXEC
64656 + if (vma_m)
64657 + kmem_cache_free(vm_area_cachep, vma_m);
64658 +#endif
64659 +
64660 kmem_cache_free(vm_area_cachep, vma);
64661 unacct_error:
64662 if (charged)
64663 @@ -1255,6 +1411,44 @@ unacct_error:
64664 return error;
64665 }
64666
64667 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
64668 +{
64669 + if (!vma) {
64670 +#ifdef CONFIG_STACK_GROWSUP
64671 + if (addr > sysctl_heap_stack_gap)
64672 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
64673 + else
64674 + vma = find_vma(current->mm, 0);
64675 + if (vma && (vma->vm_flags & VM_GROWSUP))
64676 + return false;
64677 +#endif
64678 + return true;
64679 + }
64680 +
64681 + if (addr + len > vma->vm_start)
64682 + return false;
64683 +
64684 + if (vma->vm_flags & VM_GROWSDOWN)
64685 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
64686 +#ifdef CONFIG_STACK_GROWSUP
64687 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
64688 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
64689 +#endif
64690 +
64691 + return true;
64692 +}
64693 +
64694 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
64695 +{
64696 + if (vma->vm_start < len)
64697 + return -ENOMEM;
64698 + if (!(vma->vm_flags & VM_GROWSDOWN))
64699 + return vma->vm_start - len;
64700 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
64701 + return vma->vm_start - len - sysctl_heap_stack_gap;
64702 + return -ENOMEM;
64703 +}
64704 +
64705 /* Get an address range which is currently unmapped.
64706 * For shmat() with addr=0.
64707 *
64708 @@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
64709 if (flags & MAP_FIXED)
64710 return addr;
64711
64712 +#ifdef CONFIG_PAX_RANDMMAP
64713 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64714 +#endif
64715 +
64716 if (addr) {
64717 addr = PAGE_ALIGN(addr);
64718 - vma = find_vma(mm, addr);
64719 - if (TASK_SIZE - len >= addr &&
64720 - (!vma || addr + len <= vma->vm_start))
64721 - return addr;
64722 + if (TASK_SIZE - len >= addr) {
64723 + vma = find_vma(mm, addr);
64724 + if (check_heap_stack_gap(vma, addr, len))
64725 + return addr;
64726 + }
64727 }
64728 if (len > mm->cached_hole_size) {
64729 - start_addr = addr = mm->free_area_cache;
64730 + start_addr = addr = mm->free_area_cache;
64731 } else {
64732 - start_addr = addr = TASK_UNMAPPED_BASE;
64733 - mm->cached_hole_size = 0;
64734 + start_addr = addr = mm->mmap_base;
64735 + mm->cached_hole_size = 0;
64736 }
64737
64738 full_search:
64739 @@ -1303,34 +1502,40 @@ full_search:
64740 * Start a new search - just in case we missed
64741 * some holes.
64742 */
64743 - if (start_addr != TASK_UNMAPPED_BASE) {
64744 - addr = TASK_UNMAPPED_BASE;
64745 - start_addr = addr;
64746 + if (start_addr != mm->mmap_base) {
64747 + start_addr = addr = mm->mmap_base;
64748 mm->cached_hole_size = 0;
64749 goto full_search;
64750 }
64751 return -ENOMEM;
64752 }
64753 - if (!vma || addr + len <= vma->vm_start) {
64754 - /*
64755 - * Remember the place where we stopped the search:
64756 - */
64757 - mm->free_area_cache = addr + len;
64758 - return addr;
64759 - }
64760 + if (check_heap_stack_gap(vma, addr, len))
64761 + break;
64762 if (addr + mm->cached_hole_size < vma->vm_start)
64763 mm->cached_hole_size = vma->vm_start - addr;
64764 addr = vma->vm_end;
64765 }
64766 +
64767 + /*
64768 + * Remember the place where we stopped the search:
64769 + */
64770 + mm->free_area_cache = addr + len;
64771 + return addr;
64772 }
64773 #endif
64774
64775 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
64776 {
64777 +
64778 +#ifdef CONFIG_PAX_SEGMEXEC
64779 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64780 + return;
64781 +#endif
64782 +
64783 /*
64784 * Is this a new hole at the lowest possible address?
64785 */
64786 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
64787 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
64788 mm->free_area_cache = addr;
64789 mm->cached_hole_size = ~0UL;
64790 }
64791 @@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
64792 {
64793 struct vm_area_struct *vma;
64794 struct mm_struct *mm = current->mm;
64795 - unsigned long addr = addr0;
64796 + unsigned long base = mm->mmap_base, addr = addr0;
64797
64798 /* requested length too big for entire address space */
64799 if (len > TASK_SIZE)
64800 @@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
64801 if (flags & MAP_FIXED)
64802 return addr;
64803
64804 +#ifdef CONFIG_PAX_RANDMMAP
64805 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
64806 +#endif
64807 +
64808 /* requesting a specific address */
64809 if (addr) {
64810 addr = PAGE_ALIGN(addr);
64811 - vma = find_vma(mm, addr);
64812 - if (TASK_SIZE - len >= addr &&
64813 - (!vma || addr + len <= vma->vm_start))
64814 - return addr;
64815 + if (TASK_SIZE - len >= addr) {
64816 + vma = find_vma(mm, addr);
64817 + if (check_heap_stack_gap(vma, addr, len))
64818 + return addr;
64819 + }
64820 }
64821
64822 /* check if free_area_cache is useful for us */
64823 @@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
64824 /* make sure it can fit in the remaining address space */
64825 if (addr > len) {
64826 vma = find_vma(mm, addr-len);
64827 - if (!vma || addr <= vma->vm_start)
64828 + if (check_heap_stack_gap(vma, addr - len, len))
64829 /* remember the address as a hint for next time */
64830 return (mm->free_area_cache = addr-len);
64831 }
64832 @@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
64833 * return with success:
64834 */
64835 vma = find_vma(mm, addr);
64836 - if (!vma || addr+len <= vma->vm_start)
64837 + if (check_heap_stack_gap(vma, addr, len))
64838 /* remember the address as a hint for next time */
64839 return (mm->free_area_cache = addr);
64840
64841 @@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
64842 mm->cached_hole_size = vma->vm_start - addr;
64843
64844 /* try just below the current vma->vm_start */
64845 - addr = vma->vm_start-len;
64846 - } while (len < vma->vm_start);
64847 + addr = skip_heap_stack_gap(vma, len);
64848 + } while (!IS_ERR_VALUE(addr));
64849
64850 bottomup:
64851 /*
64852 @@ -1414,13 +1624,21 @@ bottomup:
64853 * can happen with large stack limits and large mmap()
64854 * allocations.
64855 */
64856 + mm->mmap_base = TASK_UNMAPPED_BASE;
64857 +
64858 +#ifdef CONFIG_PAX_RANDMMAP
64859 + if (mm->pax_flags & MF_PAX_RANDMMAP)
64860 + mm->mmap_base += mm->delta_mmap;
64861 +#endif
64862 +
64863 + mm->free_area_cache = mm->mmap_base;
64864 mm->cached_hole_size = ~0UL;
64865 - mm->free_area_cache = TASK_UNMAPPED_BASE;
64866 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
64867 /*
64868 * Restore the topdown base:
64869 */
64870 - mm->free_area_cache = mm->mmap_base;
64871 + mm->mmap_base = base;
64872 + mm->free_area_cache = base;
64873 mm->cached_hole_size = ~0UL;
64874
64875 return addr;
64876 @@ -1429,6 +1647,12 @@ bottomup:
64877
64878 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
64879 {
64880 +
64881 +#ifdef CONFIG_PAX_SEGMEXEC
64882 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
64883 + return;
64884 +#endif
64885 +
64886 /*
64887 * Is this a new hole at the highest possible address?
64888 */
64889 @@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
64890 mm->free_area_cache = addr;
64891
64892 /* dont allow allocations above current base */
64893 - if (mm->free_area_cache > mm->mmap_base)
64894 + if (mm->free_area_cache > mm->mmap_base) {
64895 mm->free_area_cache = mm->mmap_base;
64896 + mm->cached_hole_size = ~0UL;
64897 + }
64898 }
64899
64900 unsigned long
64901 @@ -1545,6 +1771,27 @@ out:
64902 return prev ? prev->vm_next : vma;
64903 }
64904
64905 +#ifdef CONFIG_PAX_SEGMEXEC
64906 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
64907 +{
64908 + struct vm_area_struct *vma_m;
64909 +
64910 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
64911 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
64912 + BUG_ON(vma->vm_mirror);
64913 + return NULL;
64914 + }
64915 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
64916 + vma_m = vma->vm_mirror;
64917 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
64918 + BUG_ON(vma->vm_file != vma_m->vm_file);
64919 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
64920 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
64921 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
64922 + return vma_m;
64923 +}
64924 +#endif
64925 +
64926 /*
64927 * Verify that the stack growth is acceptable and
64928 * update accounting. This is shared with both the
64929 @@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
64930 return -ENOMEM;
64931
64932 /* Stack limit test */
64933 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
64934 if (size > rlim[RLIMIT_STACK].rlim_cur)
64935 return -ENOMEM;
64936
64937 @@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
64938 unsigned long limit;
64939 locked = mm->locked_vm + grow;
64940 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
64941 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
64942 if (locked > limit && !capable(CAP_IPC_LOCK))
64943 return -ENOMEM;
64944 }
64945 @@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
64946 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
64947 * vma is the last one with address > vma->vm_end. Have to extend vma.
64948 */
64949 +#ifndef CONFIG_IA64
64950 +static
64951 +#endif
64952 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
64953 {
64954 int error;
64955 + bool locknext;
64956
64957 if (!(vma->vm_flags & VM_GROWSUP))
64958 return -EFAULT;
64959
64960 + /* Also guard against wrapping around to address 0. */
64961 + if (address < PAGE_ALIGN(address+1))
64962 + address = PAGE_ALIGN(address+1);
64963 + else
64964 + return -ENOMEM;
64965 +
64966 /*
64967 * We must make sure the anon_vma is allocated
64968 * so that the anon_vma locking is not a noop.
64969 */
64970 if (unlikely(anon_vma_prepare(vma)))
64971 return -ENOMEM;
64972 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
64973 + if (locknext && anon_vma_prepare(vma->vm_next))
64974 + return -ENOMEM;
64975 anon_vma_lock(vma);
64976 + if (locknext)
64977 + anon_vma_lock(vma->vm_next);
64978
64979 /*
64980 * vma->vm_start/vm_end cannot change under us because the caller
64981 * is required to hold the mmap_sem in read mode. We need the
64982 - * anon_vma lock to serialize against concurrent expand_stacks.
64983 - * Also guard against wrapping around to address 0.
64984 + * anon_vma locks to serialize against concurrent expand_stacks
64985 + * and expand_upwards.
64986 */
64987 - if (address < PAGE_ALIGN(address+4))
64988 - address = PAGE_ALIGN(address+4);
64989 - else {
64990 - anon_vma_unlock(vma);
64991 - return -ENOMEM;
64992 - }
64993 error = 0;
64994
64995 /* Somebody else might have raced and expanded it already */
64996 - if (address > vma->vm_end) {
64997 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
64998 + error = -ENOMEM;
64999 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
65000 unsigned long size, grow;
65001
65002 size = address - vma->vm_start;
65003 @@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
65004 if (!error)
65005 vma->vm_end = address;
65006 }
65007 + if (locknext)
65008 + anon_vma_unlock(vma->vm_next);
65009 anon_vma_unlock(vma);
65010 return error;
65011 }
65012 @@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
65013 unsigned long address)
65014 {
65015 int error;
65016 + bool lockprev = false;
65017 + struct vm_area_struct *prev;
65018
65019 /*
65020 * We must make sure the anon_vma is allocated
65021 @@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
65022 if (error)
65023 return error;
65024
65025 + prev = vma->vm_prev;
65026 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
65027 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
65028 +#endif
65029 + if (lockprev && anon_vma_prepare(prev))
65030 + return -ENOMEM;
65031 + if (lockprev)
65032 + anon_vma_lock(prev);
65033 +
65034 anon_vma_lock(vma);
65035
65036 /*
65037 @@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
65038 */
65039
65040 /* Somebody else might have raced and expanded it already */
65041 - if (address < vma->vm_start) {
65042 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
65043 + error = -ENOMEM;
65044 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
65045 unsigned long size, grow;
65046
65047 +#ifdef CONFIG_PAX_SEGMEXEC
65048 + struct vm_area_struct *vma_m;
65049 +
65050 + vma_m = pax_find_mirror_vma(vma);
65051 +#endif
65052 +
65053 size = vma->vm_end - address;
65054 grow = (vma->vm_start - address) >> PAGE_SHIFT;
65055
65056 @@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
65057 if (!error) {
65058 vma->vm_start = address;
65059 vma->vm_pgoff -= grow;
65060 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
65061 +
65062 +#ifdef CONFIG_PAX_SEGMEXEC
65063 + if (vma_m) {
65064 + vma_m->vm_start -= grow << PAGE_SHIFT;
65065 + vma_m->vm_pgoff -= grow;
65066 + }
65067 +#endif
65068 +
65069 }
65070 }
65071 anon_vma_unlock(vma);
65072 + if (lockprev)
65073 + anon_vma_unlock(prev);
65074 return error;
65075 }
65076
65077 @@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
65078 do {
65079 long nrpages = vma_pages(vma);
65080
65081 +#ifdef CONFIG_PAX_SEGMEXEC
65082 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
65083 + vma = remove_vma(vma);
65084 + continue;
65085 + }
65086 +#endif
65087 +
65088 mm->total_vm -= nrpages;
65089 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
65090 vma = remove_vma(vma);
65091 @@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
65092 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
65093 vma->vm_prev = NULL;
65094 do {
65095 +
65096 +#ifdef CONFIG_PAX_SEGMEXEC
65097 + if (vma->vm_mirror) {
65098 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
65099 + vma->vm_mirror->vm_mirror = NULL;
65100 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
65101 + vma->vm_mirror = NULL;
65102 + }
65103 +#endif
65104 +
65105 rb_erase(&vma->vm_rb, &mm->mm_rb);
65106 mm->map_count--;
65107 tail_vma = vma;
65108 @@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
65109 struct mempolicy *pol;
65110 struct vm_area_struct *new;
65111
65112 +#ifdef CONFIG_PAX_SEGMEXEC
65113 + struct vm_area_struct *vma_m, *new_m = NULL;
65114 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
65115 +#endif
65116 +
65117 if (is_vm_hugetlb_page(vma) && (addr &
65118 ~(huge_page_mask(hstate_vma(vma)))))
65119 return -EINVAL;
65120
65121 +#ifdef CONFIG_PAX_SEGMEXEC
65122 + vma_m = pax_find_mirror_vma(vma);
65123 +
65124 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
65125 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
65126 + if (mm->map_count >= sysctl_max_map_count-1)
65127 + return -ENOMEM;
65128 + } else
65129 +#endif
65130 +
65131 if (mm->map_count >= sysctl_max_map_count)
65132 return -ENOMEM;
65133
65134 @@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
65135 if (!new)
65136 return -ENOMEM;
65137
65138 +#ifdef CONFIG_PAX_SEGMEXEC
65139 + if (vma_m) {
65140 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65141 + if (!new_m) {
65142 + kmem_cache_free(vm_area_cachep, new);
65143 + return -ENOMEM;
65144 + }
65145 + }
65146 +#endif
65147 +
65148 /* most fields are the same, copy all, and then fixup */
65149 *new = *vma;
65150
65151 @@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
65152 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
65153 }
65154
65155 +#ifdef CONFIG_PAX_SEGMEXEC
65156 + if (vma_m) {
65157 + *new_m = *vma_m;
65158 + new_m->vm_mirror = new;
65159 + new->vm_mirror = new_m;
65160 +
65161 + if (new_below)
65162 + new_m->vm_end = addr_m;
65163 + else {
65164 + new_m->vm_start = addr_m;
65165 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
65166 + }
65167 + }
65168 +#endif
65169 +
65170 pol = mpol_dup(vma_policy(vma));
65171 if (IS_ERR(pol)) {
65172 +
65173 +#ifdef CONFIG_PAX_SEGMEXEC
65174 + if (new_m)
65175 + kmem_cache_free(vm_area_cachep, new_m);
65176 +#endif
65177 +
65178 kmem_cache_free(vm_area_cachep, new);
65179 return PTR_ERR(pol);
65180 }
65181 @@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
65182 else
65183 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
65184
65185 +#ifdef CONFIG_PAX_SEGMEXEC
65186 + if (vma_m) {
65187 + mpol_get(pol);
65188 + vma_set_policy(new_m, pol);
65189 +
65190 + if (new_m->vm_file) {
65191 + get_file(new_m->vm_file);
65192 + if (vma_m->vm_flags & VM_EXECUTABLE)
65193 + added_exe_file_vma(mm);
65194 + }
65195 +
65196 + if (new_m->vm_ops && new_m->vm_ops->open)
65197 + new_m->vm_ops->open(new_m);
65198 +
65199 + if (new_below)
65200 + vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
65201 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
65202 + else
65203 + vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
65204 + }
65205 +#endif
65206 +
65207 return 0;
65208 }
65209
65210 @@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
65211 * work. This now handles partial unmappings.
65212 * Jeremy Fitzhardinge <jeremy@goop.org>
65213 */
65214 +#ifdef CONFIG_PAX_SEGMEXEC
65215 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65216 +{
65217 + int ret = __do_munmap(mm, start, len);
65218 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
65219 + return ret;
65220 +
65221 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
65222 +}
65223 +
65224 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65225 +#else
65226 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
65227 +#endif
65228 {
65229 unsigned long end;
65230 struct vm_area_struct *vma, *prev, *last;
65231
65232 + /*
65233 + * mm->mmap_sem is required to protect against another thread
65234 + * changing the mappings in case we sleep.
65235 + */
65236 + verify_mm_writelocked(mm);
65237 +
65238 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
65239 return -EINVAL;
65240
65241 @@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
65242 /* Fix up all other VM information */
65243 remove_vma_list(mm, vma);
65244
65245 + track_exec_limit(mm, start, end, 0UL);
65246 +
65247 return 0;
65248 }
65249
65250 @@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
65251
65252 profile_munmap(addr);
65253
65254 +#ifdef CONFIG_PAX_SEGMEXEC
65255 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
65256 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
65257 + return -EINVAL;
65258 +#endif
65259 +
65260 down_write(&mm->mmap_sem);
65261 ret = do_munmap(mm, addr, len);
65262 up_write(&mm->mmap_sem);
65263 return ret;
65264 }
65265
65266 -static inline void verify_mm_writelocked(struct mm_struct *mm)
65267 -{
65268 -#ifdef CONFIG_DEBUG_VM
65269 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
65270 - WARN_ON(1);
65271 - up_read(&mm->mmap_sem);
65272 - }
65273 -#endif
65274 -}
65275 -
65276 /*
65277 * this is really a simplified "do_mmap". it only handles
65278 * anonymous maps. eventually we may be able to do some
65279 @@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
65280 struct rb_node ** rb_link, * rb_parent;
65281 pgoff_t pgoff = addr >> PAGE_SHIFT;
65282 int error;
65283 + unsigned long charged;
65284
65285 len = PAGE_ALIGN(len);
65286 if (!len)
65287 @@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
65288
65289 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
65290
65291 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
65292 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
65293 + flags &= ~VM_EXEC;
65294 +
65295 +#ifdef CONFIG_PAX_MPROTECT
65296 + if (mm->pax_flags & MF_PAX_MPROTECT)
65297 + flags &= ~VM_MAYEXEC;
65298 +#endif
65299 +
65300 + }
65301 +#endif
65302 +
65303 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
65304 if (error & ~PAGE_MASK)
65305 return error;
65306
65307 + charged = len >> PAGE_SHIFT;
65308 +
65309 /*
65310 * mlock MCL_FUTURE?
65311 */
65312 if (mm->def_flags & VM_LOCKED) {
65313 unsigned long locked, lock_limit;
65314 - locked = len >> PAGE_SHIFT;
65315 + locked = charged;
65316 locked += mm->locked_vm;
65317 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
65318 lock_limit >>= PAGE_SHIFT;
65319 @@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
65320 /*
65321 * Clear old maps. this also does some error checking for us
65322 */
65323 - munmap_back:
65324 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65325 if (vma && vma->vm_start < addr + len) {
65326 if (do_munmap(mm, addr, len))
65327 return -ENOMEM;
65328 - goto munmap_back;
65329 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65330 + BUG_ON(vma && vma->vm_start < addr + len);
65331 }
65332
65333 /* Check against address space limits *after* clearing old maps... */
65334 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
65335 + if (!may_expand_vm(mm, charged))
65336 return -ENOMEM;
65337
65338 if (mm->map_count > sysctl_max_map_count)
65339 return -ENOMEM;
65340
65341 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
65342 + if (security_vm_enough_memory(charged))
65343 return -ENOMEM;
65344
65345 /* Can we just expand an old private anonymous mapping? */
65346 @@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
65347 */
65348 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65349 if (!vma) {
65350 - vm_unacct_memory(len >> PAGE_SHIFT);
65351 + vm_unacct_memory(charged);
65352 return -ENOMEM;
65353 }
65354
65355 @@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
65356 vma->vm_page_prot = vm_get_page_prot(flags);
65357 vma_link(mm, vma, prev, rb_link, rb_parent);
65358 out:
65359 - mm->total_vm += len >> PAGE_SHIFT;
65360 + mm->total_vm += charged;
65361 if (flags & VM_LOCKED) {
65362 if (!mlock_vma_pages_range(vma, addr, addr + len))
65363 - mm->locked_vm += (len >> PAGE_SHIFT);
65364 + mm->locked_vm += charged;
65365 }
65366 + track_exec_limit(mm, addr, addr + len, flags);
65367 return addr;
65368 }
65369
65370 @@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
65371 * Walk the list again, actually closing and freeing it,
65372 * with preemption enabled, without holding any MM locks.
65373 */
65374 - while (vma)
65375 + while (vma) {
65376 + vma->vm_mirror = NULL;
65377 vma = remove_vma(vma);
65378 + }
65379
65380 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
65381 }
65382 @@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
65383 struct vm_area_struct * __vma, * prev;
65384 struct rb_node ** rb_link, * rb_parent;
65385
65386 +#ifdef CONFIG_PAX_SEGMEXEC
65387 + struct vm_area_struct *vma_m = NULL;
65388 +#endif
65389 +
65390 /*
65391 * The vm_pgoff of a purely anonymous vma should be irrelevant
65392 * until its first write fault, when page's anon_vma and index
65393 @@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
65394 if ((vma->vm_flags & VM_ACCOUNT) &&
65395 security_vm_enough_memory_mm(mm, vma_pages(vma)))
65396 return -ENOMEM;
65397 +
65398 +#ifdef CONFIG_PAX_SEGMEXEC
65399 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
65400 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65401 + if (!vma_m)
65402 + return -ENOMEM;
65403 + }
65404 +#endif
65405 +
65406 vma_link(mm, vma, prev, rb_link, rb_parent);
65407 +
65408 +#ifdef CONFIG_PAX_SEGMEXEC
65409 + if (vma_m)
65410 + pax_mirror_vma(vma_m, vma);
65411 +#endif
65412 +
65413 return 0;
65414 }
65415
65416 @@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
65417 struct rb_node **rb_link, *rb_parent;
65418 struct mempolicy *pol;
65419
65420 + BUG_ON(vma->vm_mirror);
65421 +
65422 /*
65423 * If anonymous vma has not yet been faulted, update new pgoff
65424 * to match new location, to increase its chance of merging.
65425 @@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
65426 return new_vma;
65427 }
65428
65429 +#ifdef CONFIG_PAX_SEGMEXEC
65430 +void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
65431 +{
65432 + struct vm_area_struct *prev_m;
65433 + struct rb_node **rb_link_m, *rb_parent_m;
65434 + struct mempolicy *pol_m;
65435 +
65436 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
65437 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
65438 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
65439 + *vma_m = *vma;
65440 + pol_m = vma_policy(vma_m);
65441 + mpol_get(pol_m);
65442 + vma_set_policy(vma_m, pol_m);
65443 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
65444 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
65445 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
65446 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
65447 + if (vma_m->vm_file)
65448 + get_file(vma_m->vm_file);
65449 + if (vma_m->vm_ops && vma_m->vm_ops->open)
65450 + vma_m->vm_ops->open(vma_m);
65451 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
65452 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
65453 + vma_m->vm_mirror = vma;
65454 + vma->vm_mirror = vma_m;
65455 +}
65456 +#endif
65457 +
65458 /*
65459 * Return true if the calling process may expand its vm space by the passed
65460 * number of pages
65461 @@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
65462 unsigned long lim;
65463
65464 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
65465 -
65466 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
65467 if (cur + npages > lim)
65468 return 0;
65469 return 1;
65470 @@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
65471 vma->vm_start = addr;
65472 vma->vm_end = addr + len;
65473
65474 +#ifdef CONFIG_PAX_MPROTECT
65475 + if (mm->pax_flags & MF_PAX_MPROTECT) {
65476 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
65477 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
65478 + return -EPERM;
65479 + if (!(vm_flags & VM_EXEC))
65480 + vm_flags &= ~VM_MAYEXEC;
65481 +#else
65482 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
65483 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
65484 +#endif
65485 + else
65486 + vm_flags &= ~VM_MAYWRITE;
65487 + }
65488 +#endif
65489 +
65490 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
65491 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65492
65493 diff -urNp linux-2.6.32.42/mm/mprotect.c linux-2.6.32.42/mm/mprotect.c
65494 --- linux-2.6.32.42/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
65495 +++ linux-2.6.32.42/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
65496 @@ -24,10 +24,16 @@
65497 #include <linux/mmu_notifier.h>
65498 #include <linux/migrate.h>
65499 #include <linux/perf_event.h>
65500 +
65501 +#ifdef CONFIG_PAX_MPROTECT
65502 +#include <linux/elf.h>
65503 +#endif
65504 +
65505 #include <asm/uaccess.h>
65506 #include <asm/pgtable.h>
65507 #include <asm/cacheflush.h>
65508 #include <asm/tlbflush.h>
65509 +#include <asm/mmu_context.h>
65510
65511 #ifndef pgprot_modify
65512 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
65513 @@ -132,6 +138,48 @@ static void change_protection(struct vm_
65514 flush_tlb_range(vma, start, end);
65515 }
65516
65517 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65518 +/* called while holding the mmap semaphor for writing except stack expansion */
65519 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
65520 +{
65521 + unsigned long oldlimit, newlimit = 0UL;
65522 +
65523 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
65524 + return;
65525 +
65526 + spin_lock(&mm->page_table_lock);
65527 + oldlimit = mm->context.user_cs_limit;
65528 + if ((prot & VM_EXEC) && oldlimit < end)
65529 + /* USER_CS limit moved up */
65530 + newlimit = end;
65531 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
65532 + /* USER_CS limit moved down */
65533 + newlimit = start;
65534 +
65535 + if (newlimit) {
65536 + mm->context.user_cs_limit = newlimit;
65537 +
65538 +#ifdef CONFIG_SMP
65539 + wmb();
65540 + cpus_clear(mm->context.cpu_user_cs_mask);
65541 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
65542 +#endif
65543 +
65544 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
65545 + }
65546 + spin_unlock(&mm->page_table_lock);
65547 + if (newlimit == end) {
65548 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
65549 +
65550 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
65551 + if (is_vm_hugetlb_page(vma))
65552 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
65553 + else
65554 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
65555 + }
65556 +}
65557 +#endif
65558 +
65559 int
65560 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
65561 unsigned long start, unsigned long end, unsigned long newflags)
65562 @@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
65563 int error;
65564 int dirty_accountable = 0;
65565
65566 +#ifdef CONFIG_PAX_SEGMEXEC
65567 + struct vm_area_struct *vma_m = NULL;
65568 + unsigned long start_m, end_m;
65569 +
65570 + start_m = start + SEGMEXEC_TASK_SIZE;
65571 + end_m = end + SEGMEXEC_TASK_SIZE;
65572 +#endif
65573 +
65574 if (newflags == oldflags) {
65575 *pprev = vma;
65576 return 0;
65577 }
65578
65579 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
65580 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
65581 +
65582 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
65583 + return -ENOMEM;
65584 +
65585 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
65586 + return -ENOMEM;
65587 + }
65588 +
65589 /*
65590 * If we make a private mapping writable we increase our commit;
65591 * but (without finer accounting) cannot reduce our commit if we
65592 @@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
65593 }
65594 }
65595
65596 +#ifdef CONFIG_PAX_SEGMEXEC
65597 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
65598 + if (start != vma->vm_start) {
65599 + error = split_vma(mm, vma, start, 1);
65600 + if (error)
65601 + goto fail;
65602 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
65603 + *pprev = (*pprev)->vm_next;
65604 + }
65605 +
65606 + if (end != vma->vm_end) {
65607 + error = split_vma(mm, vma, end, 0);
65608 + if (error)
65609 + goto fail;
65610 + }
65611 +
65612 + if (pax_find_mirror_vma(vma)) {
65613 + error = __do_munmap(mm, start_m, end_m - start_m);
65614 + if (error)
65615 + goto fail;
65616 + } else {
65617 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65618 + if (!vma_m) {
65619 + error = -ENOMEM;
65620 + goto fail;
65621 + }
65622 + vma->vm_flags = newflags;
65623 + pax_mirror_vma(vma_m, vma);
65624 + }
65625 + }
65626 +#endif
65627 +
65628 /*
65629 * First try to merge with previous and/or next vma.
65630 */
65631 @@ -195,9 +293,21 @@ success:
65632 * vm_flags and vm_page_prot are protected by the mmap_sem
65633 * held in write mode.
65634 */
65635 +
65636 +#ifdef CONFIG_PAX_SEGMEXEC
65637 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
65638 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
65639 +#endif
65640 +
65641 vma->vm_flags = newflags;
65642 +
65643 +#ifdef CONFIG_PAX_MPROTECT
65644 + if (mm->binfmt && mm->binfmt->handle_mprotect)
65645 + mm->binfmt->handle_mprotect(vma, newflags);
65646 +#endif
65647 +
65648 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
65649 - vm_get_page_prot(newflags));
65650 + vm_get_page_prot(vma->vm_flags));
65651
65652 if (vma_wants_writenotify(vma)) {
65653 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
65654 @@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65655 end = start + len;
65656 if (end <= start)
65657 return -ENOMEM;
65658 +
65659 +#ifdef CONFIG_PAX_SEGMEXEC
65660 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
65661 + if (end > SEGMEXEC_TASK_SIZE)
65662 + return -EINVAL;
65663 + } else
65664 +#endif
65665 +
65666 + if (end > TASK_SIZE)
65667 + return -EINVAL;
65668 +
65669 if (!arch_validate_prot(prot))
65670 return -EINVAL;
65671
65672 @@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65673 /*
65674 * Does the application expect PROT_READ to imply PROT_EXEC:
65675 */
65676 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
65677 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
65678 prot |= PROT_EXEC;
65679
65680 vm_flags = calc_vm_prot_bits(prot);
65681 @@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65682 if (start > vma->vm_start)
65683 prev = vma;
65684
65685 +#ifdef CONFIG_PAX_MPROTECT
65686 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
65687 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
65688 +#endif
65689 +
65690 for (nstart = start ; ; ) {
65691 unsigned long newflags;
65692
65693 @@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65694
65695 /* newflags >> 4 shift VM_MAY% in place of VM_% */
65696 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
65697 + if (prot & (PROT_WRITE | PROT_EXEC))
65698 + gr_log_rwxmprotect(vma->vm_file);
65699 +
65700 + error = -EACCES;
65701 + goto out;
65702 + }
65703 +
65704 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
65705 error = -EACCES;
65706 goto out;
65707 }
65708 @@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
65709 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
65710 if (error)
65711 goto out;
65712 +
65713 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
65714 +
65715 nstart = tmp;
65716
65717 if (nstart < prev->vm_end)
65718 diff -urNp linux-2.6.32.42/mm/mremap.c linux-2.6.32.42/mm/mremap.c
65719 --- linux-2.6.32.42/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
65720 +++ linux-2.6.32.42/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
65721 @@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
65722 continue;
65723 pte = ptep_clear_flush(vma, old_addr, old_pte);
65724 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
65725 +
65726 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
65727 + if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
65728 + pte = pte_exprotect(pte);
65729 +#endif
65730 +
65731 set_pte_at(mm, new_addr, new_pte, pte);
65732 }
65733
65734 @@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
65735 if (is_vm_hugetlb_page(vma))
65736 goto Einval;
65737
65738 +#ifdef CONFIG_PAX_SEGMEXEC
65739 + if (pax_find_mirror_vma(vma))
65740 + goto Einval;
65741 +#endif
65742 +
65743 /* We can't remap across vm area boundaries */
65744 if (old_len > vma->vm_end - addr)
65745 goto Efault;
65746 @@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
65747 unsigned long ret = -EINVAL;
65748 unsigned long charged = 0;
65749 unsigned long map_flags;
65750 + unsigned long pax_task_size = TASK_SIZE;
65751
65752 if (new_addr & ~PAGE_MASK)
65753 goto out;
65754
65755 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
65756 +#ifdef CONFIG_PAX_SEGMEXEC
65757 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
65758 + pax_task_size = SEGMEXEC_TASK_SIZE;
65759 +#endif
65760 +
65761 + pax_task_size -= PAGE_SIZE;
65762 +
65763 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
65764 goto out;
65765
65766 /* Check if the location we're moving into overlaps the
65767 * old location at all, and fail if it does.
65768 */
65769 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
65770 - goto out;
65771 -
65772 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
65773 + if (addr + old_len > new_addr && new_addr + new_len > addr)
65774 goto out;
65775
65776 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65777 @@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
65778 struct vm_area_struct *vma;
65779 unsigned long ret = -EINVAL;
65780 unsigned long charged = 0;
65781 + unsigned long pax_task_size = TASK_SIZE;
65782
65783 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
65784 goto out;
65785 @@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
65786 if (!new_len)
65787 goto out;
65788
65789 +#ifdef CONFIG_PAX_SEGMEXEC
65790 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
65791 + pax_task_size = SEGMEXEC_TASK_SIZE;
65792 +#endif
65793 +
65794 + pax_task_size -= PAGE_SIZE;
65795 +
65796 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
65797 + old_len > pax_task_size || addr > pax_task_size-old_len)
65798 + goto out;
65799 +
65800 if (flags & MREMAP_FIXED) {
65801 if (flags & MREMAP_MAYMOVE)
65802 ret = mremap_to(addr, old_len, new_addr, new_len);
65803 @@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
65804 addr + new_len);
65805 }
65806 ret = addr;
65807 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
65808 goto out;
65809 }
65810 }
65811 @@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
65812 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
65813 if (ret)
65814 goto out;
65815 +
65816 + map_flags = vma->vm_flags;
65817 ret = move_vma(vma, addr, old_len, new_len, new_addr);
65818 + if (!(ret & ~PAGE_MASK)) {
65819 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
65820 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
65821 + }
65822 }
65823 out:
65824 if (ret & ~PAGE_MASK)
65825 diff -urNp linux-2.6.32.42/mm/nommu.c linux-2.6.32.42/mm/nommu.c
65826 --- linux-2.6.32.42/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
65827 +++ linux-2.6.32.42/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
65828 @@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
65829 int sysctl_overcommit_ratio = 50; /* default is 50% */
65830 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
65831 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
65832 -int heap_stack_gap = 0;
65833
65834 atomic_long_t mmap_pages_allocated;
65835
65836 @@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
65837 EXPORT_SYMBOL(find_vma);
65838
65839 /*
65840 - * find a VMA
65841 - * - we don't extend stack VMAs under NOMMU conditions
65842 - */
65843 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
65844 -{
65845 - return find_vma(mm, addr);
65846 -}
65847 -
65848 -/*
65849 * expand a stack to a given address
65850 * - not supported under NOMMU conditions
65851 */
65852 diff -urNp linux-2.6.32.42/mm/page_alloc.c linux-2.6.32.42/mm/page_alloc.c
65853 --- linux-2.6.32.42/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
65854 +++ linux-2.6.32.42/mm/page_alloc.c 2011-06-25 12:56:37.000000000 -0400
65855 @@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
65856 int bad = 0;
65857 int wasMlocked = __TestClearPageMlocked(page);
65858
65859 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
65860 + unsigned long index = 1UL << order;
65861 +#endif
65862 +
65863 kmemcheck_free_shadow(page, order);
65864
65865 for (i = 0 ; i < (1 << order) ; ++i)
65866 @@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
65867 debug_check_no_obj_freed(page_address(page),
65868 PAGE_SIZE << order);
65869 }
65870 +
65871 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
65872 + for (; index; --index)
65873 + sanitize_highpage(page + index - 1);
65874 +#endif
65875 +
65876 arch_free_page(page, order);
65877 kernel_map_pages(page, 1 << order, 0);
65878
65879 @@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
65880 arch_alloc_page(page, order);
65881 kernel_map_pages(page, 1 << order, 1);
65882
65883 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
65884 if (gfp_flags & __GFP_ZERO)
65885 prep_zero_page(page, order, gfp_flags);
65886 +#endif
65887
65888 if (order && (gfp_flags & __GFP_COMP))
65889 prep_compound_page(page, order);
65890 @@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
65891 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
65892 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
65893 }
65894 +
65895 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
65896 + sanitize_highpage(page);
65897 +#endif
65898 +
65899 arch_free_page(page, 0);
65900 kernel_map_pages(page, 1, 0);
65901
65902 @@ -2179,6 +2196,8 @@ void show_free_areas(void)
65903 int cpu;
65904 struct zone *zone;
65905
65906 + pax_track_stack();
65907 +
65908 for_each_populated_zone(zone) {
65909 show_node(zone);
65910 printk("%s per-cpu:\n", zone->name);
65911 @@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
65912 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
65913 }
65914 #else
65915 -static void inline setup_usemap(struct pglist_data *pgdat,
65916 +static inline void setup_usemap(struct pglist_data *pgdat,
65917 struct zone *zone, unsigned long zonesize) {}
65918 #endif /* CONFIG_SPARSEMEM */
65919
65920 diff -urNp linux-2.6.32.42/mm/percpu.c linux-2.6.32.42/mm/percpu.c
65921 --- linux-2.6.32.42/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
65922 +++ linux-2.6.32.42/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
65923 @@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
65924 static unsigned int pcpu_last_unit_cpu __read_mostly;
65925
65926 /* the address of the first chunk which starts with the kernel static area */
65927 -void *pcpu_base_addr __read_mostly;
65928 +void *pcpu_base_addr __read_only;
65929 EXPORT_SYMBOL_GPL(pcpu_base_addr);
65930
65931 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
65932 diff -urNp linux-2.6.32.42/mm/rmap.c linux-2.6.32.42/mm/rmap.c
65933 --- linux-2.6.32.42/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
65934 +++ linux-2.6.32.42/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
65935 @@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
65936 /* page_table_lock to protect against threads */
65937 spin_lock(&mm->page_table_lock);
65938 if (likely(!vma->anon_vma)) {
65939 +
65940 +#ifdef CONFIG_PAX_SEGMEXEC
65941 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
65942 +
65943 + if (vma_m) {
65944 + BUG_ON(vma_m->anon_vma);
65945 + vma_m->anon_vma = anon_vma;
65946 + list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
65947 + }
65948 +#endif
65949 +
65950 vma->anon_vma = anon_vma;
65951 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
65952 allocated = NULL;
65953 diff -urNp linux-2.6.32.42/mm/shmem.c linux-2.6.32.42/mm/shmem.c
65954 --- linux-2.6.32.42/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
65955 +++ linux-2.6.32.42/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
65956 @@ -31,7 +31,7 @@
65957 #include <linux/swap.h>
65958 #include <linux/ima.h>
65959
65960 -static struct vfsmount *shm_mnt;
65961 +struct vfsmount *shm_mnt;
65962
65963 #ifdef CONFIG_SHMEM
65964 /*
65965 @@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
65966 goto unlock;
65967 }
65968 entry = shmem_swp_entry(info, index, NULL);
65969 + if (!entry)
65970 + goto unlock;
65971 if (entry->val) {
65972 /*
65973 * The more uptodate page coming down from a stacked
65974 @@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
65975 struct vm_area_struct pvma;
65976 struct page *page;
65977
65978 + pax_track_stack();
65979 +
65980 spol = mpol_cond_copy(&mpol,
65981 mpol_shared_policy_lookup(&info->policy, idx));
65982
65983 @@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
65984
65985 info = SHMEM_I(inode);
65986 inode->i_size = len-1;
65987 - if (len <= (char *)inode - (char *)info) {
65988 + if (len <= (char *)inode - (char *)info && len <= 64) {
65989 /* do it inline */
65990 memcpy(info, symname, len);
65991 inode->i_op = &shmem_symlink_inline_operations;
65992 @@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
65993 int err = -ENOMEM;
65994
65995 /* Round up to L1_CACHE_BYTES to resist false sharing */
65996 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
65997 - L1_CACHE_BYTES), GFP_KERNEL);
65998 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
65999 if (!sbinfo)
66000 return -ENOMEM;
66001
66002 diff -urNp linux-2.6.32.42/mm/slab.c linux-2.6.32.42/mm/slab.c
66003 --- linux-2.6.32.42/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
66004 +++ linux-2.6.32.42/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
66005 @@ -174,7 +174,7 @@
66006
66007 /* Legal flag mask for kmem_cache_create(). */
66008 #if DEBUG
66009 -# define CREATE_MASK (SLAB_RED_ZONE | \
66010 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
66011 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
66012 SLAB_CACHE_DMA | \
66013 SLAB_STORE_USER | \
66014 @@ -182,7 +182,7 @@
66015 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66016 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
66017 #else
66018 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
66019 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
66020 SLAB_CACHE_DMA | \
66021 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
66022 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66023 @@ -308,7 +308,7 @@ struct kmem_list3 {
66024 * Need this for bootstrapping a per node allocator.
66025 */
66026 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
66027 -struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
66028 +struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
66029 #define CACHE_CACHE 0
66030 #define SIZE_AC MAX_NUMNODES
66031 #define SIZE_L3 (2 * MAX_NUMNODES)
66032 @@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
66033 if ((x)->max_freeable < i) \
66034 (x)->max_freeable = i; \
66035 } while (0)
66036 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
66037 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
66038 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
66039 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
66040 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
66041 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
66042 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
66043 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
66044 #else
66045 #define STATS_INC_ACTIVE(x) do { } while (0)
66046 #define STATS_DEC_ACTIVE(x) do { } while (0)
66047 @@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
66048 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
66049 */
66050 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
66051 - const struct slab *slab, void *obj)
66052 + const struct slab *slab, const void *obj)
66053 {
66054 u32 offset = (obj - slab->s_mem);
66055 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
66056 @@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
66057 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
66058 sizes[INDEX_AC].cs_size,
66059 ARCH_KMALLOC_MINALIGN,
66060 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66061 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66062 NULL);
66063
66064 if (INDEX_AC != INDEX_L3) {
66065 @@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
66066 kmem_cache_create(names[INDEX_L3].name,
66067 sizes[INDEX_L3].cs_size,
66068 ARCH_KMALLOC_MINALIGN,
66069 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66070 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66071 NULL);
66072 }
66073
66074 @@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
66075 sizes->cs_cachep = kmem_cache_create(names->name,
66076 sizes->cs_size,
66077 ARCH_KMALLOC_MINALIGN,
66078 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66079 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66080 NULL);
66081 }
66082 #ifdef CONFIG_ZONE_DMA
66083 @@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
66084 }
66085 /* cpu stats */
66086 {
66087 - unsigned long allochit = atomic_read(&cachep->allochit);
66088 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
66089 - unsigned long freehit = atomic_read(&cachep->freehit);
66090 - unsigned long freemiss = atomic_read(&cachep->freemiss);
66091 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
66092 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
66093 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
66094 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
66095
66096 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
66097 allochit, allocmiss, freehit, freemiss);
66098 @@ -4471,15 +4471,66 @@ static const struct file_operations proc
66099
66100 static int __init slab_proc_init(void)
66101 {
66102 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
66103 + mode_t gr_mode = S_IRUGO;
66104 +
66105 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66106 + gr_mode = S_IRUSR;
66107 +#endif
66108 +
66109 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
66110 #ifdef CONFIG_DEBUG_SLAB_LEAK
66111 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
66112 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
66113 #endif
66114 return 0;
66115 }
66116 module_init(slab_proc_init);
66117 #endif
66118
66119 +void check_object_size(const void *ptr, unsigned long n, bool to)
66120 +{
66121 +
66122 +#ifdef CONFIG_PAX_USERCOPY
66123 + struct page *page;
66124 + struct kmem_cache *cachep = NULL;
66125 + struct slab *slabp;
66126 + unsigned int objnr;
66127 + unsigned long offset;
66128 +
66129 + if (!n)
66130 + return;
66131 +
66132 + if (ZERO_OR_NULL_PTR(ptr))
66133 + goto report;
66134 +
66135 + if (!virt_addr_valid(ptr))
66136 + return;
66137 +
66138 + page = virt_to_head_page(ptr);
66139 +
66140 + if (!PageSlab(page)) {
66141 + if (object_is_on_stack(ptr, n) == -1)
66142 + goto report;
66143 + return;
66144 + }
66145 +
66146 + cachep = page_get_cache(page);
66147 + if (!(cachep->flags & SLAB_USERCOPY))
66148 + goto report;
66149 +
66150 + slabp = page_get_slab(page);
66151 + objnr = obj_to_index(cachep, slabp, ptr);
66152 + BUG_ON(objnr >= cachep->num);
66153 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
66154 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
66155 + return;
66156 +
66157 +report:
66158 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
66159 +#endif
66160 +
66161 +}
66162 +EXPORT_SYMBOL(check_object_size);
66163 +
66164 /**
66165 * ksize - get the actual amount of memory allocated for a given object
66166 * @objp: Pointer to the object
66167 diff -urNp linux-2.6.32.42/mm/slob.c linux-2.6.32.42/mm/slob.c
66168 --- linux-2.6.32.42/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
66169 +++ linux-2.6.32.42/mm/slob.c 2011-04-17 15:56:46.000000000 -0400
66170 @@ -29,7 +29,7 @@
66171 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
66172 * alloc_pages() directly, allocating compound pages so the page order
66173 * does not have to be separately tracked, and also stores the exact
66174 - * allocation size in page->private so that it can be used to accurately
66175 + * allocation size in slob_page->size so that it can be used to accurately
66176 * provide ksize(). These objects are detected in kfree() because slob_page()
66177 * is false for them.
66178 *
66179 @@ -58,6 +58,7 @@
66180 */
66181
66182 #include <linux/kernel.h>
66183 +#include <linux/sched.h>
66184 #include <linux/slab.h>
66185 #include <linux/mm.h>
66186 #include <linux/swap.h> /* struct reclaim_state */
66187 @@ -100,7 +101,8 @@ struct slob_page {
66188 unsigned long flags; /* mandatory */
66189 atomic_t _count; /* mandatory */
66190 slobidx_t units; /* free units left in page */
66191 - unsigned long pad[2];
66192 + unsigned long pad[1];
66193 + unsigned long size; /* size when >=PAGE_SIZE */
66194 slob_t *free; /* first free slob_t in page */
66195 struct list_head list; /* linked list of free pages */
66196 };
66197 @@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
66198 */
66199 static inline int is_slob_page(struct slob_page *sp)
66200 {
66201 - return PageSlab((struct page *)sp);
66202 + return PageSlab((struct page *)sp) && !sp->size;
66203 }
66204
66205 static inline void set_slob_page(struct slob_page *sp)
66206 @@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
66207
66208 static inline struct slob_page *slob_page(const void *addr)
66209 {
66210 - return (struct slob_page *)virt_to_page(addr);
66211 + return (struct slob_page *)virt_to_head_page(addr);
66212 }
66213
66214 /*
66215 @@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
66216 /*
66217 * Return the size of a slob block.
66218 */
66219 -static slobidx_t slob_units(slob_t *s)
66220 +static slobidx_t slob_units(const slob_t *s)
66221 {
66222 if (s->units > 0)
66223 return s->units;
66224 @@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
66225 /*
66226 * Return the next free slob block pointer after this one.
66227 */
66228 -static slob_t *slob_next(slob_t *s)
66229 +static slob_t *slob_next(const slob_t *s)
66230 {
66231 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
66232 slobidx_t next;
66233 @@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
66234 /*
66235 * Returns true if s is the last free block in its page.
66236 */
66237 -static int slob_last(slob_t *s)
66238 +static int slob_last(const slob_t *s)
66239 {
66240 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
66241 }
66242 @@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
66243 if (!page)
66244 return NULL;
66245
66246 + set_slob_page(page);
66247 return page_address(page);
66248 }
66249
66250 @@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
66251 if (!b)
66252 return NULL;
66253 sp = slob_page(b);
66254 - set_slob_page(sp);
66255
66256 spin_lock_irqsave(&slob_lock, flags);
66257 sp->units = SLOB_UNITS(PAGE_SIZE);
66258 sp->free = b;
66259 + sp->size = 0;
66260 INIT_LIST_HEAD(&sp->list);
66261 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
66262 set_slob_page_free(sp, slob_list);
66263 @@ -475,10 +478,9 @@ out:
66264 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
66265 #endif
66266
66267 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66268 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
66269 {
66270 - unsigned int *m;
66271 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66272 + slob_t *m;
66273 void *ret;
66274
66275 lockdep_trace_alloc(gfp);
66276 @@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
66277
66278 if (!m)
66279 return NULL;
66280 - *m = size;
66281 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
66282 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
66283 + m[0].units = size;
66284 + m[1].units = align;
66285 ret = (void *)m + align;
66286
66287 trace_kmalloc_node(_RET_IP_, ret,
66288 @@ -501,9 +506,9 @@ void *__kmalloc_node(size_t size, gfp_t
66289
66290 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
66291 if (ret) {
66292 - struct page *page;
66293 - page = virt_to_page(ret);
66294 - page->private = size;
66295 + struct slob_page *sp;
66296 + sp = slob_page(ret);
66297 + sp->size = size;
66298 }
66299
66300 trace_kmalloc_node(_RET_IP_, ret,
66301 @@ -513,6 +518,13 @@ void *__kmalloc_node(size_t size, gfp_t
66302 kmemleak_alloc(ret, size, 1, gfp);
66303 return ret;
66304 }
66305 +
66306 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
66307 +{
66308 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66309 +
66310 + return __kmalloc_node_align(size, gfp, node, align);
66311 +}
66312 EXPORT_SYMBOL(__kmalloc_node);
66313
66314 void kfree(const void *block)
66315 @@ -528,13 +540,81 @@ void kfree(const void *block)
66316 sp = slob_page(block);
66317 if (is_slob_page(sp)) {
66318 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66319 - unsigned int *m = (unsigned int *)(block - align);
66320 - slob_free(m, *m + align);
66321 - } else
66322 + slob_t *m = (slob_t *)(block - align);
66323 + slob_free(m, m[0].units + align);
66324 + } else {
66325 + clear_slob_page(sp);
66326 + free_slob_page(sp);
66327 + sp->size = 0;
66328 put_page(&sp->page);
66329 + }
66330 }
66331 EXPORT_SYMBOL(kfree);
66332
66333 +void check_object_size(const void *ptr, unsigned long n, bool to)
66334 +{
66335 +
66336 +#ifdef CONFIG_PAX_USERCOPY
66337 + struct slob_page *sp;
66338 + const slob_t *free;
66339 + const void *base;
66340 +
66341 + if (!n)
66342 + return;
66343 +
66344 + if (ZERO_OR_NULL_PTR(ptr))
66345 + goto report;
66346 +
66347 + if (!virt_addr_valid(ptr))
66348 + return;
66349 +
66350 + sp = slob_page(ptr);
66351 + if (!PageSlab((struct page*)sp)) {
66352 + if (object_is_on_stack(ptr, n) == -1)
66353 + goto report;
66354 + return;
66355 + }
66356 +
66357 + if (sp->size) {
66358 + base = page_address(&sp->page);
66359 + if (base <= ptr && n <= sp->size - (ptr - base))
66360 + return;
66361 + goto report;
66362 + }
66363 +
66364 + /* some tricky double walking to find the chunk */
66365 + base = (void *)((unsigned long)ptr & PAGE_MASK);
66366 + free = sp->free;
66367 +
66368 + while (!slob_last(free) && (void *)free <= ptr) {
66369 + base = free + slob_units(free);
66370 + free = slob_next(free);
66371 + }
66372 +
66373 + while (base < (void *)free) {
66374 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
66375 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
66376 + int offset;
66377 +
66378 + if (ptr < base + align)
66379 + goto report;
66380 +
66381 + offset = ptr - base - align;
66382 + if (offset < m) {
66383 + if (n <= m - offset)
66384 + return;
66385 + goto report;
66386 + }
66387 + base += size;
66388 + }
66389 +
66390 +report:
66391 + pax_report_usercopy(ptr, n, to, NULL);
66392 +#endif
66393 +
66394 +}
66395 +EXPORT_SYMBOL(check_object_size);
66396 +
66397 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
66398 size_t ksize(const void *block)
66399 {
66400 @@ -547,10 +627,10 @@ size_t ksize(const void *block)
66401 sp = slob_page(block);
66402 if (is_slob_page(sp)) {
66403 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
66404 - unsigned int *m = (unsigned int *)(block - align);
66405 - return SLOB_UNITS(*m) * SLOB_UNIT;
66406 + slob_t *m = (slob_t *)(block - align);
66407 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
66408 } else
66409 - return sp->page.private;
66410 + return sp->size;
66411 }
66412 EXPORT_SYMBOL(ksize);
66413
66414 @@ -605,17 +685,25 @@ void *kmem_cache_alloc_node(struct kmem_
66415 {
66416 void *b;
66417
66418 +#ifdef CONFIG_PAX_USERCOPY
66419 + b = __kmalloc_node_align(c->size, flags, node, c->align);
66420 +#else
66421 if (c->size < PAGE_SIZE) {
66422 b = slob_alloc(c->size, flags, c->align, node);
66423 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66424 SLOB_UNITS(c->size) * SLOB_UNIT,
66425 flags, node);
66426 } else {
66427 + struct slob_page *sp;
66428 +
66429 b = slob_new_pages(flags, get_order(c->size), node);
66430 + sp = slob_page(b);
66431 + sp->size = c->size;
66432 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
66433 PAGE_SIZE << get_order(c->size),
66434 flags, node);
66435 }
66436 +#endif
66437
66438 if (c->ctor)
66439 c->ctor(b);
66440 @@ -627,10 +715,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
66441
66442 static void __kmem_cache_free(void *b, int size)
66443 {
66444 - if (size < PAGE_SIZE)
66445 + struct slob_page *sp = slob_page(b);
66446 +
66447 + if (is_slob_page(sp))
66448 slob_free(b, size);
66449 - else
66450 + else {
66451 + clear_slob_page(sp);
66452 + free_slob_page(sp);
66453 + sp->size = 0;
66454 slob_free_pages(b, get_order(size));
66455 + }
66456 }
66457
66458 static void kmem_rcu_free(struct rcu_head *head)
66459 @@ -643,15 +737,24 @@ static void kmem_rcu_free(struct rcu_hea
66460
66461 void kmem_cache_free(struct kmem_cache *c, void *b)
66462 {
66463 + int size = c->size;
66464 +
66465 +#ifdef CONFIG_PAX_USERCOPY
66466 + if (size + c->align < PAGE_SIZE) {
66467 + size += c->align;
66468 + b -= c->align;
66469 + }
66470 +#endif
66471 +
66472 kmemleak_free_recursive(b, c->flags);
66473 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
66474 struct slob_rcu *slob_rcu;
66475 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
66476 + slob_rcu = b + (size - sizeof(struct slob_rcu));
66477 INIT_RCU_HEAD(&slob_rcu->head);
66478 - slob_rcu->size = c->size;
66479 + slob_rcu->size = size;
66480 call_rcu(&slob_rcu->head, kmem_rcu_free);
66481 } else {
66482 - __kmem_cache_free(b, c->size);
66483 + __kmem_cache_free(b, size);
66484 }
66485
66486 trace_kmem_cache_free(_RET_IP_, b);
66487 diff -urNp linux-2.6.32.42/mm/slub.c linux-2.6.32.42/mm/slub.c
66488 --- linux-2.6.32.42/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
66489 +++ linux-2.6.32.42/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
66490 @@ -410,7 +410,7 @@ static void print_track(const char *s, s
66491 if (!t->addr)
66492 return;
66493
66494 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
66495 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
66496 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
66497 }
66498
66499 @@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
66500
66501 page = virt_to_head_page(x);
66502
66503 + BUG_ON(!PageSlab(page));
66504 +
66505 slab_free(s, page, x, _RET_IP_);
66506
66507 trace_kmem_cache_free(_RET_IP_, x);
66508 @@ -1937,7 +1939,7 @@ static int slub_min_objects;
66509 * Merge control. If this is set then no merging of slab caches will occur.
66510 * (Could be removed. This was introduced to pacify the merge skeptics.)
66511 */
66512 -static int slub_nomerge;
66513 +static int slub_nomerge = 1;
66514
66515 /*
66516 * Calculate the order of allocation given an slab object size.
66517 @@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
66518 * list to avoid pounding the page allocator excessively.
66519 */
66520 set_min_partial(s, ilog2(s->size));
66521 - s->refcount = 1;
66522 + atomic_set(&s->refcount, 1);
66523 #ifdef CONFIG_NUMA
66524 s->remote_node_defrag_ratio = 1000;
66525 #endif
66526 @@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
66527 void kmem_cache_destroy(struct kmem_cache *s)
66528 {
66529 down_write(&slub_lock);
66530 - s->refcount--;
66531 - if (!s->refcount) {
66532 + if (atomic_dec_and_test(&s->refcount)) {
66533 list_del(&s->list);
66534 up_write(&slub_lock);
66535 if (kmem_cache_close(s)) {
66536 @@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
66537 __setup("slub_nomerge", setup_slub_nomerge);
66538
66539 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
66540 - const char *name, int size, gfp_t gfp_flags)
66541 + const char *name, int size, gfp_t gfp_flags, unsigned int flags)
66542 {
66543 - unsigned int flags = 0;
66544 -
66545 if (gfp_flags & SLUB_DMA)
66546 - flags = SLAB_CACHE_DMA;
66547 + flags |= SLAB_CACHE_DMA;
66548
66549 /*
66550 * This function is called with IRQs disabled during early-boot on
66551 @@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
66552 EXPORT_SYMBOL(__kmalloc_node);
66553 #endif
66554
66555 +void check_object_size(const void *ptr, unsigned long n, bool to)
66556 +{
66557 +
66558 +#ifdef CONFIG_PAX_USERCOPY
66559 + struct page *page;
66560 + struct kmem_cache *s = NULL;
66561 + unsigned long offset;
66562 +
66563 + if (!n)
66564 + return;
66565 +
66566 + if (ZERO_OR_NULL_PTR(ptr))
66567 + goto report;
66568 +
66569 + if (!virt_addr_valid(ptr))
66570 + return;
66571 +
66572 + page = get_object_page(ptr);
66573 +
66574 + if (!page) {
66575 + if (object_is_on_stack(ptr, n) == -1)
66576 + goto report;
66577 + return;
66578 + }
66579 +
66580 + s = page->slab;
66581 + if (!(s->flags & SLAB_USERCOPY))
66582 + goto report;
66583 +
66584 + offset = (ptr - page_address(page)) % s->size;
66585 + if (offset <= s->objsize && n <= s->objsize - offset)
66586 + return;
66587 +
66588 +report:
66589 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
66590 +#endif
66591 +
66592 +}
66593 +EXPORT_SYMBOL(check_object_size);
66594 +
66595 size_t ksize(const void *object)
66596 {
66597 struct page *page;
66598 @@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
66599 * kmem_cache_open for slab_state == DOWN.
66600 */
66601 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
66602 - sizeof(struct kmem_cache_node), GFP_NOWAIT);
66603 - kmalloc_caches[0].refcount = -1;
66604 + sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
66605 + atomic_set(&kmalloc_caches[0].refcount, -1);
66606 caches++;
66607
66608 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
66609 @@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
66610 /* Caches that are not of the two-to-the-power-of size */
66611 if (KMALLOC_MIN_SIZE <= 32) {
66612 create_kmalloc_cache(&kmalloc_caches[1],
66613 - "kmalloc-96", 96, GFP_NOWAIT);
66614 + "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
66615 caches++;
66616 }
66617 if (KMALLOC_MIN_SIZE <= 64) {
66618 create_kmalloc_cache(&kmalloc_caches[2],
66619 - "kmalloc-192", 192, GFP_NOWAIT);
66620 + "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
66621 caches++;
66622 }
66623
66624 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
66625 create_kmalloc_cache(&kmalloc_caches[i],
66626 - "kmalloc", 1 << i, GFP_NOWAIT);
66627 + "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
66628 caches++;
66629 }
66630
66631 @@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
66632 /*
66633 * We may have set a slab to be unmergeable during bootstrap.
66634 */
66635 - if (s->refcount < 0)
66636 + if (atomic_read(&s->refcount) < 0)
66637 return 1;
66638
66639 return 0;
66640 @@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
66641 if (s) {
66642 int cpu;
66643
66644 - s->refcount++;
66645 + atomic_inc(&s->refcount);
66646 /*
66647 * Adjust the object sizes so that we clear
66648 * the complete object on kzalloc.
66649 @@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
66650
66651 if (sysfs_slab_alias(s, name)) {
66652 down_write(&slub_lock);
66653 - s->refcount--;
66654 + atomic_dec(&s->refcount);
66655 up_write(&slub_lock);
66656 goto err;
66657 }
66658 @@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
66659
66660 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
66661 {
66662 - return sprintf(buf, "%d\n", s->refcount - 1);
66663 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
66664 }
66665 SLAB_ATTR_RO(aliases);
66666
66667 @@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
66668 kfree(s);
66669 }
66670
66671 -static struct sysfs_ops slab_sysfs_ops = {
66672 +static const struct sysfs_ops slab_sysfs_ops = {
66673 .show = slab_attr_show,
66674 .store = slab_attr_store,
66675 };
66676 @@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
66677 return 0;
66678 }
66679
66680 -static struct kset_uevent_ops slab_uevent_ops = {
66681 +static const struct kset_uevent_ops slab_uevent_ops = {
66682 .filter = uevent_filter,
66683 };
66684
66685 @@ -4785,7 +4824,13 @@ static const struct file_operations proc
66686
66687 static int __init slab_proc_init(void)
66688 {
66689 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
66690 + mode_t gr_mode = S_IRUGO;
66691 +
66692 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66693 + gr_mode = S_IRUSR;
66694 +#endif
66695 +
66696 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
66697 return 0;
66698 }
66699 module_init(slab_proc_init);
66700 diff -urNp linux-2.6.32.42/mm/util.c linux-2.6.32.42/mm/util.c
66701 --- linux-2.6.32.42/mm/util.c 2011-03-27 14:31:47.000000000 -0400
66702 +++ linux-2.6.32.42/mm/util.c 2011-04-17 15:56:46.000000000 -0400
66703 @@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
66704 void arch_pick_mmap_layout(struct mm_struct *mm)
66705 {
66706 mm->mmap_base = TASK_UNMAPPED_BASE;
66707 +
66708 +#ifdef CONFIG_PAX_RANDMMAP
66709 + if (mm->pax_flags & MF_PAX_RANDMMAP)
66710 + mm->mmap_base += mm->delta_mmap;
66711 +#endif
66712 +
66713 mm->get_unmapped_area = arch_get_unmapped_area;
66714 mm->unmap_area = arch_unmap_area;
66715 }
66716 diff -urNp linux-2.6.32.42/mm/vmalloc.c linux-2.6.32.42/mm/vmalloc.c
66717 --- linux-2.6.32.42/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
66718 +++ linux-2.6.32.42/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
66719 @@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
66720
66721 pte = pte_offset_kernel(pmd, addr);
66722 do {
66723 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
66724 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
66725 +
66726 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66727 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
66728 + BUG_ON(!pte_exec(*pte));
66729 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
66730 + continue;
66731 + }
66732 +#endif
66733 +
66734 + {
66735 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
66736 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
66737 + }
66738 } while (pte++, addr += PAGE_SIZE, addr != end);
66739 }
66740
66741 @@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
66742 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
66743 {
66744 pte_t *pte;
66745 + int ret = -ENOMEM;
66746
66747 /*
66748 * nr is a running index into the array which helps higher level
66749 @@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
66750 pte = pte_alloc_kernel(pmd, addr);
66751 if (!pte)
66752 return -ENOMEM;
66753 +
66754 + pax_open_kernel();
66755 do {
66756 struct page *page = pages[*nr];
66757
66758 - if (WARN_ON(!pte_none(*pte)))
66759 - return -EBUSY;
66760 - if (WARN_ON(!page))
66761 - return -ENOMEM;
66762 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66763 + if (!(pgprot_val(prot) & _PAGE_NX))
66764 + BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
66765 + else
66766 +#endif
66767 +
66768 + if (WARN_ON(!pte_none(*pte))) {
66769 + ret = -EBUSY;
66770 + goto out;
66771 + }
66772 + if (WARN_ON(!page)) {
66773 + ret = -ENOMEM;
66774 + goto out;
66775 + }
66776 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
66777 (*nr)++;
66778 } while (pte++, addr += PAGE_SIZE, addr != end);
66779 - return 0;
66780 + ret = 0;
66781 +out:
66782 + pax_close_kernel();
66783 + return ret;
66784 }
66785
66786 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
66787 @@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
66788 * and fall back on vmalloc() if that fails. Others
66789 * just put it in the vmalloc space.
66790 */
66791 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
66792 +#ifdef CONFIG_MODULES
66793 +#ifdef MODULES_VADDR
66794 unsigned long addr = (unsigned long)x;
66795 if (addr >= MODULES_VADDR && addr < MODULES_END)
66796 return 1;
66797 #endif
66798 +
66799 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
66800 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
66801 + return 1;
66802 +#endif
66803 +
66804 +#endif
66805 +
66806 return is_vmalloc_addr(x);
66807 }
66808
66809 @@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
66810
66811 if (!pgd_none(*pgd)) {
66812 pud_t *pud = pud_offset(pgd, addr);
66813 +#ifdef CONFIG_X86
66814 + if (!pud_large(*pud))
66815 +#endif
66816 if (!pud_none(*pud)) {
66817 pmd_t *pmd = pmd_offset(pud, addr);
66818 +#ifdef CONFIG_X86
66819 + if (!pmd_large(*pmd))
66820 +#endif
66821 if (!pmd_none(*pmd)) {
66822 pte_t *ptep, pte;
66823
66824 @@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
66825 struct rb_node *tmp;
66826
66827 while (*p) {
66828 - struct vmap_area *tmp;
66829 + struct vmap_area *varea;
66830
66831 parent = *p;
66832 - tmp = rb_entry(parent, struct vmap_area, rb_node);
66833 - if (va->va_start < tmp->va_end)
66834 + varea = rb_entry(parent, struct vmap_area, rb_node);
66835 + if (va->va_start < varea->va_end)
66836 p = &(*p)->rb_left;
66837 - else if (va->va_end > tmp->va_start)
66838 + else if (va->va_end > varea->va_start)
66839 p = &(*p)->rb_right;
66840 else
66841 BUG();
66842 @@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
66843 struct vm_struct *area;
66844
66845 BUG_ON(in_interrupt());
66846 +
66847 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66848 + if (flags & VM_KERNEXEC) {
66849 + if (start != VMALLOC_START || end != VMALLOC_END)
66850 + return NULL;
66851 + start = (unsigned long)MODULES_EXEC_VADDR;
66852 + end = (unsigned long)MODULES_EXEC_END;
66853 + }
66854 +#endif
66855 +
66856 if (flags & VM_IOREMAP) {
66857 int bit = fls(size);
66858
66859 @@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
66860 if (count > totalram_pages)
66861 return NULL;
66862
66863 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66864 + if (!(pgprot_val(prot) & _PAGE_NX))
66865 + flags |= VM_KERNEXEC;
66866 +#endif
66867 +
66868 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
66869 __builtin_return_address(0));
66870 if (!area)
66871 @@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
66872 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
66873 return NULL;
66874
66875 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
66876 + if (!(pgprot_val(prot) & _PAGE_NX))
66877 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
66878 + node, gfp_mask, caller);
66879 + else
66880 +#endif
66881 +
66882 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
66883 VMALLOC_END, node, gfp_mask, caller);
66884
66885 @@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
66886 return addr;
66887 }
66888
66889 +#undef __vmalloc
66890 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
66891 {
66892 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
66893 @@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
66894 * For tight control over page level allocator and protection flags
66895 * use __vmalloc() instead.
66896 */
66897 +#undef vmalloc
66898 void *vmalloc(unsigned long size)
66899 {
66900 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
66901 @@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
66902 * The resulting memory area is zeroed so it can be mapped to userspace
66903 * without leaking data.
66904 */
66905 +#undef vmalloc_user
66906 void *vmalloc_user(unsigned long size)
66907 {
66908 struct vm_struct *area;
66909 @@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
66910 * For tight control over page level allocator and protection flags
66911 * use __vmalloc() instead.
66912 */
66913 +#undef vmalloc_node
66914 void *vmalloc_node(unsigned long size, int node)
66915 {
66916 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
66917 @@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
66918 * For tight control over page level allocator and protection flags
66919 * use __vmalloc() instead.
66920 */
66921 -
66922 +#undef vmalloc_exec
66923 void *vmalloc_exec(unsigned long size)
66924 {
66925 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
66926 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
66927 -1, __builtin_return_address(0));
66928 }
66929
66930 @@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
66931 * Allocate enough 32bit PA addressable pages to cover @size from the
66932 * page level allocator and map them into contiguous kernel virtual space.
66933 */
66934 +#undef vmalloc_32
66935 void *vmalloc_32(unsigned long size)
66936 {
66937 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
66938 @@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
66939 * The resulting memory area is 32bit addressable and zeroed so it can be
66940 * mapped to userspace without leaking data.
66941 */
66942 +#undef vmalloc_32_user
66943 void *vmalloc_32_user(unsigned long size)
66944 {
66945 struct vm_struct *area;
66946 @@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
66947 unsigned long uaddr = vma->vm_start;
66948 unsigned long usize = vma->vm_end - vma->vm_start;
66949
66950 + BUG_ON(vma->vm_mirror);
66951 +
66952 if ((PAGE_SIZE-1) & (unsigned long)addr)
66953 return -EINVAL;
66954
66955 diff -urNp linux-2.6.32.42/mm/vmstat.c linux-2.6.32.42/mm/vmstat.c
66956 --- linux-2.6.32.42/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
66957 +++ linux-2.6.32.42/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
66958 @@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
66959 *
66960 * vm_stat contains the global counters
66961 */
66962 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66963 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
66964 EXPORT_SYMBOL(vm_stat);
66965
66966 #ifdef CONFIG_SMP
66967 @@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
66968 v = p->vm_stat_diff[i];
66969 p->vm_stat_diff[i] = 0;
66970 local_irq_restore(flags);
66971 - atomic_long_add(v, &zone->vm_stat[i]);
66972 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
66973 global_diff[i] += v;
66974 #ifdef CONFIG_NUMA
66975 /* 3 seconds idle till flush */
66976 @@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
66977
66978 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
66979 if (global_diff[i])
66980 - atomic_long_add(global_diff[i], &vm_stat[i]);
66981 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
66982 }
66983
66984 #endif
66985 @@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
66986 start_cpu_timer(cpu);
66987 #endif
66988 #ifdef CONFIG_PROC_FS
66989 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
66990 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
66991 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
66992 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
66993 + {
66994 + mode_t gr_mode = S_IRUGO;
66995 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
66996 + gr_mode = S_IRUSR;
66997 +#endif
66998 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
66999 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
67000 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67001 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
67002 +#else
67003 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
67004 +#endif
67005 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
67006 + }
67007 #endif
67008 return 0;
67009 }
67010 diff -urNp linux-2.6.32.42/net/8021q/vlan.c linux-2.6.32.42/net/8021q/vlan.c
67011 --- linux-2.6.32.42/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
67012 +++ linux-2.6.32.42/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
67013 @@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
67014 err = -EPERM;
67015 if (!capable(CAP_NET_ADMIN))
67016 break;
67017 - if ((args.u.name_type >= 0) &&
67018 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
67019 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
67020 struct vlan_net *vn;
67021
67022 vn = net_generic(net, vlan_net_id);
67023 diff -urNp linux-2.6.32.42/net/atm/atm_misc.c linux-2.6.32.42/net/atm/atm_misc.c
67024 --- linux-2.6.32.42/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
67025 +++ linux-2.6.32.42/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
67026 @@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
67027 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
67028 return 1;
67029 atm_return(vcc,truesize);
67030 - atomic_inc(&vcc->stats->rx_drop);
67031 + atomic_inc_unchecked(&vcc->stats->rx_drop);
67032 return 0;
67033 }
67034
67035 @@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
67036 }
67037 }
67038 atm_return(vcc,guess);
67039 - atomic_inc(&vcc->stats->rx_drop);
67040 + atomic_inc_unchecked(&vcc->stats->rx_drop);
67041 return NULL;
67042 }
67043
67044 @@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
67045
67046 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
67047 {
67048 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67049 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67050 __SONET_ITEMS
67051 #undef __HANDLE_ITEM
67052 }
67053 @@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
67054
67055 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
67056 {
67057 -#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
67058 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
67059 __SONET_ITEMS
67060 #undef __HANDLE_ITEM
67061 }
67062 diff -urNp linux-2.6.32.42/net/atm/mpoa_caches.c linux-2.6.32.42/net/atm/mpoa_caches.c
67063 --- linux-2.6.32.42/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
67064 +++ linux-2.6.32.42/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
67065 @@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
67066 struct timeval now;
67067 struct k_message msg;
67068
67069 + pax_track_stack();
67070 +
67071 do_gettimeofday(&now);
67072
67073 write_lock_irq(&client->egress_lock);
67074 diff -urNp linux-2.6.32.42/net/atm/proc.c linux-2.6.32.42/net/atm/proc.c
67075 --- linux-2.6.32.42/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
67076 +++ linux-2.6.32.42/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
67077 @@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
67078 const struct k_atm_aal_stats *stats)
67079 {
67080 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
67081 - atomic_read(&stats->tx),atomic_read(&stats->tx_err),
67082 - atomic_read(&stats->rx),atomic_read(&stats->rx_err),
67083 - atomic_read(&stats->rx_drop));
67084 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
67085 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
67086 + atomic_read_unchecked(&stats->rx_drop));
67087 }
67088
67089 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
67090 @@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
67091 {
67092 struct sock *sk = sk_atm(vcc);
67093
67094 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67095 + seq_printf(seq, "%p ", NULL);
67096 +#else
67097 seq_printf(seq, "%p ", vcc);
67098 +#endif
67099 +
67100 if (!vcc->dev)
67101 seq_printf(seq, "Unassigned ");
67102 else
67103 @@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
67104 {
67105 if (!vcc->dev)
67106 seq_printf(seq, sizeof(void *) == 4 ?
67107 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67108 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
67109 +#else
67110 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
67111 +#endif
67112 else
67113 seq_printf(seq, "%3d %3d %5d ",
67114 vcc->dev->number, vcc->vpi, vcc->vci);
67115 diff -urNp linux-2.6.32.42/net/atm/resources.c linux-2.6.32.42/net/atm/resources.c
67116 --- linux-2.6.32.42/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
67117 +++ linux-2.6.32.42/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
67118 @@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
67119 static void copy_aal_stats(struct k_atm_aal_stats *from,
67120 struct atm_aal_stats *to)
67121 {
67122 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67123 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67124 __AAL_STAT_ITEMS
67125 #undef __HANDLE_ITEM
67126 }
67127 @@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
67128 static void subtract_aal_stats(struct k_atm_aal_stats *from,
67129 struct atm_aal_stats *to)
67130 {
67131 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
67132 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
67133 __AAL_STAT_ITEMS
67134 #undef __HANDLE_ITEM
67135 }
67136 diff -urNp linux-2.6.32.42/net/bluetooth/l2cap.c linux-2.6.32.42/net/bluetooth/l2cap.c
67137 --- linux-2.6.32.42/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
67138 +++ linux-2.6.32.42/net/bluetooth/l2cap.c 2011-06-12 06:34:08.000000000 -0400
67139 @@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
67140 err = -ENOTCONN;
67141 break;
67142 }
67143 -
67144 + memset(&cinfo, 0, sizeof(cinfo));
67145 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
67146 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
67147
67148 diff -urNp linux-2.6.32.42/net/bluetooth/rfcomm/sock.c linux-2.6.32.42/net/bluetooth/rfcomm/sock.c
67149 --- linux-2.6.32.42/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
67150 +++ linux-2.6.32.42/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
67151 @@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
67152
67153 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
67154
67155 + memset(&cinfo, 0, sizeof(cinfo));
67156 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
67157 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
67158
67159 diff -urNp linux-2.6.32.42/net/bridge/br_private.h linux-2.6.32.42/net/bridge/br_private.h
67160 --- linux-2.6.32.42/net/bridge/br_private.h 2011-03-27 14:31:47.000000000 -0400
67161 +++ linux-2.6.32.42/net/bridge/br_private.h 2011-04-17 15:56:46.000000000 -0400
67162 @@ -254,7 +254,7 @@ extern void br_ifinfo_notify(int event,
67163
67164 #ifdef CONFIG_SYSFS
67165 /* br_sysfs_if.c */
67166 -extern struct sysfs_ops brport_sysfs_ops;
67167 +extern const struct sysfs_ops brport_sysfs_ops;
67168 extern int br_sysfs_addif(struct net_bridge_port *p);
67169
67170 /* br_sysfs_br.c */
67171 diff -urNp linux-2.6.32.42/net/bridge/br_stp_if.c linux-2.6.32.42/net/bridge/br_stp_if.c
67172 --- linux-2.6.32.42/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
67173 +++ linux-2.6.32.42/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
67174 @@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
67175 char *envp[] = { NULL };
67176
67177 if (br->stp_enabled == BR_USER_STP) {
67178 - r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
67179 + r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
67180 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
67181 br->dev->name, r);
67182
67183 diff -urNp linux-2.6.32.42/net/bridge/br_sysfs_if.c linux-2.6.32.42/net/bridge/br_sysfs_if.c
67184 --- linux-2.6.32.42/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
67185 +++ linux-2.6.32.42/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
67186 @@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
67187 return ret;
67188 }
67189
67190 -struct sysfs_ops brport_sysfs_ops = {
67191 +const struct sysfs_ops brport_sysfs_ops = {
67192 .show = brport_show,
67193 .store = brport_store,
67194 };
67195 diff -urNp linux-2.6.32.42/net/bridge/netfilter/ebtables.c linux-2.6.32.42/net/bridge/netfilter/ebtables.c
67196 --- linux-2.6.32.42/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
67197 +++ linux-2.6.32.42/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
67198 @@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
67199 unsigned int entries_size, nentries;
67200 char *entries;
67201
67202 + pax_track_stack();
67203 +
67204 if (cmd == EBT_SO_GET_ENTRIES) {
67205 entries_size = t->private->entries_size;
67206 nentries = t->private->nentries;
67207 diff -urNp linux-2.6.32.42/net/can/bcm.c linux-2.6.32.42/net/can/bcm.c
67208 --- linux-2.6.32.42/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
67209 +++ linux-2.6.32.42/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
67210 @@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
67211 struct bcm_sock *bo = bcm_sk(sk);
67212 struct bcm_op *op;
67213
67214 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67215 + seq_printf(m, ">>> socket %p", NULL);
67216 + seq_printf(m, " / sk %p", NULL);
67217 + seq_printf(m, " / bo %p", NULL);
67218 +#else
67219 seq_printf(m, ">>> socket %p", sk->sk_socket);
67220 seq_printf(m, " / sk %p", sk);
67221 seq_printf(m, " / bo %p", bo);
67222 +#endif
67223 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
67224 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
67225 seq_printf(m, " <<<\n");
67226 diff -urNp linux-2.6.32.42/net/core/dev.c linux-2.6.32.42/net/core/dev.c
67227 --- linux-2.6.32.42/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
67228 +++ linux-2.6.32.42/net/core/dev.c 2011-04-17 17:04:18.000000000 -0400
67229 @@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
67230 if (no_module && capable(CAP_NET_ADMIN))
67231 no_module = request_module("netdev-%s", name);
67232 if (no_module && capable(CAP_SYS_MODULE)) {
67233 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
67234 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
67235 +#else
67236 if (!request_module("%s", name))
67237 pr_err("Loading kernel module for a network device "
67238 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
67239 "instead\n", name);
67240 +#endif
67241 }
67242 }
67243 EXPORT_SYMBOL(dev_load);
67244 @@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
67245 }
67246 EXPORT_SYMBOL(netif_rx_ni);
67247
67248 -static void net_tx_action(struct softirq_action *h)
67249 +static void net_tx_action(void)
67250 {
67251 struct softnet_data *sd = &__get_cpu_var(softnet_data);
67252
67253 @@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
67254 EXPORT_SYMBOL(netif_napi_del);
67255
67256
67257 -static void net_rx_action(struct softirq_action *h)
67258 +static void net_rx_action(void)
67259 {
67260 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
67261 unsigned long time_limit = jiffies + 2;
67262 diff -urNp linux-2.6.32.42/net/core/flow.c linux-2.6.32.42/net/core/flow.c
67263 --- linux-2.6.32.42/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
67264 +++ linux-2.6.32.42/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
67265 @@ -35,11 +35,11 @@ struct flow_cache_entry {
67266 atomic_t *object_ref;
67267 };
67268
67269 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
67270 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
67271
67272 static u32 flow_hash_shift;
67273 #define flow_hash_size (1 << flow_hash_shift)
67274 -static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
67275 +static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
67276
67277 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
67278
67279 @@ -52,7 +52,7 @@ struct flow_percpu_info {
67280 u32 hash_rnd;
67281 int count;
67282 };
67283 -static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
67284 +static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
67285
67286 #define flow_hash_rnd_recalc(cpu) \
67287 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
67288 @@ -69,7 +69,7 @@ struct flow_flush_info {
67289 atomic_t cpuleft;
67290 struct completion completion;
67291 };
67292 -static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
67293 +static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
67294
67295 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
67296
67297 @@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
67298 if (fle->family == family &&
67299 fle->dir == dir &&
67300 flow_key_compare(key, &fle->key) == 0) {
67301 - if (fle->genid == atomic_read(&flow_cache_genid)) {
67302 + if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
67303 void *ret = fle->object;
67304
67305 if (ret)
67306 @@ -228,7 +228,7 @@ nocache:
67307 err = resolver(net, key, family, dir, &obj, &obj_ref);
67308
67309 if (fle && !err) {
67310 - fle->genid = atomic_read(&flow_cache_genid);
67311 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
67312
67313 if (fle->object)
67314 atomic_dec(fle->object_ref);
67315 @@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
67316
67317 fle = flow_table(cpu)[i];
67318 for (; fle; fle = fle->next) {
67319 - unsigned genid = atomic_read(&flow_cache_genid);
67320 + unsigned genid = atomic_read_unchecked(&flow_cache_genid);
67321
67322 if (!fle->object || fle->genid == genid)
67323 continue;
67324 diff -urNp linux-2.6.32.42/net/core/skbuff.c linux-2.6.32.42/net/core/skbuff.c
67325 --- linux-2.6.32.42/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
67326 +++ linux-2.6.32.42/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
67327 @@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
67328 struct sk_buff *frag_iter;
67329 struct sock *sk = skb->sk;
67330
67331 + pax_track_stack();
67332 +
67333 /*
67334 * __skb_splice_bits() only fails if the output has no room left,
67335 * so no point in going over the frag_list for the error case.
67336 diff -urNp linux-2.6.32.42/net/core/sock.c linux-2.6.32.42/net/core/sock.c
67337 --- linux-2.6.32.42/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
67338 +++ linux-2.6.32.42/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
67339 @@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
67340 break;
67341
67342 case SO_PEERCRED:
67343 + {
67344 + struct ucred peercred;
67345 if (len > sizeof(sk->sk_peercred))
67346 len = sizeof(sk->sk_peercred);
67347 - if (copy_to_user(optval, &sk->sk_peercred, len))
67348 + peercred = sk->sk_peercred;
67349 + if (copy_to_user(optval, &peercred, len))
67350 return -EFAULT;
67351 goto lenout;
67352 + }
67353
67354 case SO_PEERNAME:
67355 {
67356 @@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
67357 */
67358 smp_wmb();
67359 atomic_set(&sk->sk_refcnt, 1);
67360 - atomic_set(&sk->sk_drops, 0);
67361 + atomic_set_unchecked(&sk->sk_drops, 0);
67362 }
67363 EXPORT_SYMBOL(sock_init_data);
67364
67365 diff -urNp linux-2.6.32.42/net/decnet/sysctl_net_decnet.c linux-2.6.32.42/net/decnet/sysctl_net_decnet.c
67366 --- linux-2.6.32.42/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
67367 +++ linux-2.6.32.42/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
67368 @@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
67369
67370 if (len > *lenp) len = *lenp;
67371
67372 - if (copy_to_user(buffer, addr, len))
67373 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
67374 return -EFAULT;
67375
67376 *lenp = len;
67377 @@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
67378
67379 if (len > *lenp) len = *lenp;
67380
67381 - if (copy_to_user(buffer, devname, len))
67382 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
67383 return -EFAULT;
67384
67385 *lenp = len;
67386 diff -urNp linux-2.6.32.42/net/econet/Kconfig linux-2.6.32.42/net/econet/Kconfig
67387 --- linux-2.6.32.42/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
67388 +++ linux-2.6.32.42/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
67389 @@ -4,7 +4,7 @@
67390
67391 config ECONET
67392 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
67393 - depends on EXPERIMENTAL && INET
67394 + depends on EXPERIMENTAL && INET && BROKEN
67395 ---help---
67396 Econet is a fairly old and slow networking protocol mainly used by
67397 Acorn computers to access file and print servers. It uses native
67398 diff -urNp linux-2.6.32.42/net/ieee802154/dgram.c linux-2.6.32.42/net/ieee802154/dgram.c
67399 --- linux-2.6.32.42/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
67400 +++ linux-2.6.32.42/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
67401 @@ -318,7 +318,7 @@ out:
67402 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
67403 {
67404 if (sock_queue_rcv_skb(sk, skb) < 0) {
67405 - atomic_inc(&sk->sk_drops);
67406 + atomic_inc_unchecked(&sk->sk_drops);
67407 kfree_skb(skb);
67408 return NET_RX_DROP;
67409 }
67410 diff -urNp linux-2.6.32.42/net/ieee802154/raw.c linux-2.6.32.42/net/ieee802154/raw.c
67411 --- linux-2.6.32.42/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
67412 +++ linux-2.6.32.42/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
67413 @@ -206,7 +206,7 @@ out:
67414 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
67415 {
67416 if (sock_queue_rcv_skb(sk, skb) < 0) {
67417 - atomic_inc(&sk->sk_drops);
67418 + atomic_inc_unchecked(&sk->sk_drops);
67419 kfree_skb(skb);
67420 return NET_RX_DROP;
67421 }
67422 diff -urNp linux-2.6.32.42/net/ipv4/inet_diag.c linux-2.6.32.42/net/ipv4/inet_diag.c
67423 --- linux-2.6.32.42/net/ipv4/inet_diag.c 2011-04-17 17:00:52.000000000 -0400
67424 +++ linux-2.6.32.42/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
67425 @@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
67426 r->idiag_retrans = 0;
67427
67428 r->id.idiag_if = sk->sk_bound_dev_if;
67429 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67430 + r->id.idiag_cookie[0] = 0;
67431 + r->id.idiag_cookie[1] = 0;
67432 +#else
67433 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
67434 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
67435 +#endif
67436
67437 r->id.idiag_sport = inet->sport;
67438 r->id.idiag_dport = inet->dport;
67439 @@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
67440 r->idiag_family = tw->tw_family;
67441 r->idiag_retrans = 0;
67442 r->id.idiag_if = tw->tw_bound_dev_if;
67443 +
67444 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67445 + r->id.idiag_cookie[0] = 0;
67446 + r->id.idiag_cookie[1] = 0;
67447 +#else
67448 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
67449 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
67450 +#endif
67451 +
67452 r->id.idiag_sport = tw->tw_sport;
67453 r->id.idiag_dport = tw->tw_dport;
67454 r->id.idiag_src[0] = tw->tw_rcv_saddr;
67455 @@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
67456 if (sk == NULL)
67457 goto unlock;
67458
67459 +#ifndef CONFIG_GRKERNSEC_HIDESYM
67460 err = -ESTALE;
67461 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
67462 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
67463 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
67464 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
67465 goto out;
67466 +#endif
67467
67468 err = -ENOMEM;
67469 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
67470 @@ -436,7 +450,7 @@ static int valid_cc(const void *bc, int
67471 return 0;
67472 if (cc == len)
67473 return 1;
67474 - if (op->yes < 4)
67475 + if (op->yes < 4 || op->yes & 3)
67476 return 0;
67477 len -= op->yes;
67478 bc += op->yes;
67479 @@ -446,11 +460,11 @@ static int valid_cc(const void *bc, int
67480
67481 static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
67482 {
67483 - const unsigned char *bc = bytecode;
67484 + const void *bc = bytecode;
67485 int len = bytecode_len;
67486
67487 while (len > 0) {
67488 - struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc;
67489 + const struct inet_diag_bc_op *op = bc;
67490
67491 //printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
67492 switch (op->code) {
67493 @@ -461,22 +475,20 @@ static int inet_diag_bc_audit(const void
67494 case INET_DIAG_BC_S_LE:
67495 case INET_DIAG_BC_D_GE:
67496 case INET_DIAG_BC_D_LE:
67497 - if (op->yes < 4 || op->yes > len + 4)
67498 - return -EINVAL;
67499 case INET_DIAG_BC_JMP:
67500 - if (op->no < 4 || op->no > len + 4)
67501 + if (op->no < 4 || op->no > len + 4 || op->no & 3)
67502 return -EINVAL;
67503 if (op->no < len &&
67504 !valid_cc(bytecode, bytecode_len, len - op->no))
67505 return -EINVAL;
67506 break;
67507 case INET_DIAG_BC_NOP:
67508 - if (op->yes < 4 || op->yes > len + 4)
67509 - return -EINVAL;
67510 break;
67511 default:
67512 return -EINVAL;
67513 }
67514 + if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
67515 + return -EINVAL;
67516 bc += op->yes;
67517 len -= op->yes;
67518 }
67519 @@ -581,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
67520 r->idiag_retrans = req->retrans;
67521
67522 r->id.idiag_if = sk->sk_bound_dev_if;
67523 +
67524 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67525 + r->id.idiag_cookie[0] = 0;
67526 + r->id.idiag_cookie[1] = 0;
67527 +#else
67528 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
67529 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
67530 +#endif
67531
67532 tmo = req->expires - jiffies;
67533 if (tmo < 0)
67534 diff -urNp linux-2.6.32.42/net/ipv4/inet_hashtables.c linux-2.6.32.42/net/ipv4/inet_hashtables.c
67535 --- linux-2.6.32.42/net/ipv4/inet_hashtables.c 2011-03-27 14:31:47.000000000 -0400
67536 +++ linux-2.6.32.42/net/ipv4/inet_hashtables.c 2011-04-17 15:56:46.000000000 -0400
67537 @@ -18,11 +18,14 @@
67538 #include <linux/sched.h>
67539 #include <linux/slab.h>
67540 #include <linux/wait.h>
67541 +#include <linux/security.h>
67542
67543 #include <net/inet_connection_sock.h>
67544 #include <net/inet_hashtables.h>
67545 #include <net/ip.h>
67546
67547 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
67548 +
67549 /*
67550 * Allocate and initialize a new local port bind bucket.
67551 * The bindhash mutex for snum's hash chain must be held here.
67552 @@ -490,6 +493,8 @@ ok:
67553 }
67554 spin_unlock(&head->lock);
67555
67556 + gr_update_task_in_ip_table(current, inet_sk(sk));
67557 +
67558 if (tw) {
67559 inet_twsk_deschedule(tw, death_row);
67560 inet_twsk_put(tw);
67561 diff -urNp linux-2.6.32.42/net/ipv4/inetpeer.c linux-2.6.32.42/net/ipv4/inetpeer.c
67562 --- linux-2.6.32.42/net/ipv4/inetpeer.c 2011-03-27 14:31:47.000000000 -0400
67563 +++ linux-2.6.32.42/net/ipv4/inetpeer.c 2011-05-16 21:46:57.000000000 -0400
67564 @@ -366,6 +366,8 @@ struct inet_peer *inet_getpeer(__be32 da
67565 struct inet_peer *p, *n;
67566 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
67567
67568 + pax_track_stack();
67569 +
67570 /* Look up for the address quickly. */
67571 read_lock_bh(&peer_pool_lock);
67572 p = lookup(daddr, NULL);
67573 @@ -389,7 +391,7 @@ struct inet_peer *inet_getpeer(__be32 da
67574 return NULL;
67575 n->v4daddr = daddr;
67576 atomic_set(&n->refcnt, 1);
67577 - atomic_set(&n->rid, 0);
67578 + atomic_set_unchecked(&n->rid, 0);
67579 n->ip_id_count = secure_ip_id(daddr);
67580 n->tcp_ts_stamp = 0;
67581
67582 diff -urNp linux-2.6.32.42/net/ipv4/ip_fragment.c linux-2.6.32.42/net/ipv4/ip_fragment.c
67583 --- linux-2.6.32.42/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
67584 +++ linux-2.6.32.42/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
67585 @@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
67586 return 0;
67587
67588 start = qp->rid;
67589 - end = atomic_inc_return(&peer->rid);
67590 + end = atomic_inc_return_unchecked(&peer->rid);
67591 qp->rid = end;
67592
67593 rc = qp->q.fragments && (end - start) > max;
67594 diff -urNp linux-2.6.32.42/net/ipv4/ip_sockglue.c linux-2.6.32.42/net/ipv4/ip_sockglue.c
67595 --- linux-2.6.32.42/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
67596 +++ linux-2.6.32.42/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
67597 @@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
67598 int val;
67599 int len;
67600
67601 + pax_track_stack();
67602 +
67603 if (level != SOL_IP)
67604 return -EOPNOTSUPP;
67605
67606 diff -urNp linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c
67607 --- linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
67608 +++ linux-2.6.32.42/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
67609 @@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
67610 private = &tmp;
67611 }
67612 #endif
67613 + memset(&info, 0, sizeof(info));
67614 info.valid_hooks = t->valid_hooks;
67615 memcpy(info.hook_entry, private->hook_entry,
67616 sizeof(info.hook_entry));
67617 diff -urNp linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c
67618 --- linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
67619 +++ linux-2.6.32.42/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
67620 @@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
67621 private = &tmp;
67622 }
67623 #endif
67624 + memset(&info, 0, sizeof(info));
67625 info.valid_hooks = t->valid_hooks;
67626 memcpy(info.hook_entry, private->hook_entry,
67627 sizeof(info.hook_entry));
67628 diff -urNp linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c
67629 --- linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
67630 +++ linux-2.6.32.42/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
67631 @@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
67632
67633 *len = 0;
67634
67635 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
67636 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
67637 if (*octets == NULL) {
67638 if (net_ratelimit())
67639 printk("OOM in bsalg (%d)\n", __LINE__);
67640 diff -urNp linux-2.6.32.42/net/ipv4/raw.c linux-2.6.32.42/net/ipv4/raw.c
67641 --- linux-2.6.32.42/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
67642 +++ linux-2.6.32.42/net/ipv4/raw.c 2011-05-04 17:59:08.000000000 -0400
67643 @@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
67644 /* Charge it to the socket. */
67645
67646 if (sock_queue_rcv_skb(sk, skb) < 0) {
67647 - atomic_inc(&sk->sk_drops);
67648 + atomic_inc_unchecked(&sk->sk_drops);
67649 kfree_skb(skb);
67650 return NET_RX_DROP;
67651 }
67652 @@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
67653 int raw_rcv(struct sock *sk, struct sk_buff *skb)
67654 {
67655 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
67656 - atomic_inc(&sk->sk_drops);
67657 + atomic_inc_unchecked(&sk->sk_drops);
67658 kfree_skb(skb);
67659 return NET_RX_DROP;
67660 }
67661 @@ -724,15 +724,22 @@ static int raw_init(struct sock *sk)
67662
67663 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
67664 {
67665 + struct icmp_filter filter;
67666 +
67667 + if (optlen < 0)
67668 + return -EINVAL;
67669 if (optlen > sizeof(struct icmp_filter))
67670 optlen = sizeof(struct icmp_filter);
67671 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
67672 + if (copy_from_user(&filter, optval, optlen))
67673 return -EFAULT;
67674 + memcpy(&raw_sk(sk)->filter, &filter, optlen);
67675 +
67676 return 0;
67677 }
67678
67679 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
67680 {
67681 + struct icmp_filter filter;
67682 int len, ret = -EFAULT;
67683
67684 if (get_user(len, optlen))
67685 @@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
67686 if (len > sizeof(struct icmp_filter))
67687 len = sizeof(struct icmp_filter);
67688 ret = -EFAULT;
67689 + memcpy(&filter, &raw_sk(sk)->filter, len);
67690 if (put_user(len, optlen) ||
67691 - copy_to_user(optval, &raw_sk(sk)->filter, len))
67692 + copy_to_user(optval, &filter, len))
67693 goto out;
67694 ret = 0;
67695 out: return ret;
67696 @@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
67697 sk_wmem_alloc_get(sp),
67698 sk_rmem_alloc_get(sp),
67699 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
67700 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
67701 + atomic_read(&sp->sk_refcnt),
67702 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67703 + NULL,
67704 +#else
67705 + sp,
67706 +#endif
67707 + atomic_read_unchecked(&sp->sk_drops));
67708 }
67709
67710 static int raw_seq_show(struct seq_file *seq, void *v)
67711 diff -urNp linux-2.6.32.42/net/ipv4/route.c linux-2.6.32.42/net/ipv4/route.c
67712 --- linux-2.6.32.42/net/ipv4/route.c 2011-03-27 14:31:47.000000000 -0400
67713 +++ linux-2.6.32.42/net/ipv4/route.c 2011-05-04 17:56:28.000000000 -0400
67714 @@ -268,7 +268,7 @@ static inline unsigned int rt_hash(__be3
67715
67716 static inline int rt_genid(struct net *net)
67717 {
67718 - return atomic_read(&net->ipv4.rt_genid);
67719 + return atomic_read_unchecked(&net->ipv4.rt_genid);
67720 }
67721
67722 #ifdef CONFIG_PROC_FS
67723 @@ -888,7 +888,7 @@ static void rt_cache_invalidate(struct n
67724 unsigned char shuffle;
67725
67726 get_random_bytes(&shuffle, sizeof(shuffle));
67727 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
67728 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
67729 }
67730
67731 /*
67732 @@ -3356,7 +3356,7 @@ static __net_initdata struct pernet_oper
67733
67734 static __net_init int rt_secret_timer_init(struct net *net)
67735 {
67736 - atomic_set(&net->ipv4.rt_genid,
67737 + atomic_set_unchecked(&net->ipv4.rt_genid,
67738 (int) ((num_physpages ^ (num_physpages>>8)) ^
67739 (jiffies ^ (jiffies >> 7))));
67740
67741 diff -urNp linux-2.6.32.42/net/ipv4/tcp.c linux-2.6.32.42/net/ipv4/tcp.c
67742 --- linux-2.6.32.42/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
67743 +++ linux-2.6.32.42/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
67744 @@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
67745 int val;
67746 int err = 0;
67747
67748 + pax_track_stack();
67749 +
67750 /* This is a string value all the others are int's */
67751 if (optname == TCP_CONGESTION) {
67752 char name[TCP_CA_NAME_MAX];
67753 @@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
67754 struct tcp_sock *tp = tcp_sk(sk);
67755 int val, len;
67756
67757 + pax_track_stack();
67758 +
67759 if (get_user(len, optlen))
67760 return -EFAULT;
67761
67762 diff -urNp linux-2.6.32.42/net/ipv4/tcp_ipv4.c linux-2.6.32.42/net/ipv4/tcp_ipv4.c
67763 --- linux-2.6.32.42/net/ipv4/tcp_ipv4.c 2011-03-27 14:31:47.000000000 -0400
67764 +++ linux-2.6.32.42/net/ipv4/tcp_ipv4.c 2011-04-17 15:56:46.000000000 -0400
67765 @@ -84,6 +84,9 @@
67766 int sysctl_tcp_tw_reuse __read_mostly;
67767 int sysctl_tcp_low_latency __read_mostly;
67768
67769 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67770 +extern int grsec_enable_blackhole;
67771 +#endif
67772
67773 #ifdef CONFIG_TCP_MD5SIG
67774 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
67775 @@ -1542,6 +1545,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
67776 return 0;
67777
67778 reset:
67779 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67780 + if (!grsec_enable_blackhole)
67781 +#endif
67782 tcp_v4_send_reset(rsk, skb);
67783 discard:
67784 kfree_skb(skb);
67785 @@ -1603,12 +1609,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
67786 TCP_SKB_CB(skb)->sacked = 0;
67787
67788 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
67789 - if (!sk)
67790 + if (!sk) {
67791 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67792 + ret = 1;
67793 +#endif
67794 goto no_tcp_socket;
67795 + }
67796
67797 process:
67798 - if (sk->sk_state == TCP_TIME_WAIT)
67799 + if (sk->sk_state == TCP_TIME_WAIT) {
67800 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67801 + ret = 2;
67802 +#endif
67803 goto do_time_wait;
67804 + }
67805
67806 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
67807 goto discard_and_relse;
67808 @@ -1650,6 +1664,10 @@ no_tcp_socket:
67809 bad_packet:
67810 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
67811 } else {
67812 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67813 + if (!grsec_enable_blackhole || (ret == 1 &&
67814 + (skb->dev->flags & IFF_LOOPBACK)))
67815 +#endif
67816 tcp_v4_send_reset(NULL, skb);
67817 }
67818
67819 @@ -2237,7 +2255,11 @@ static void get_openreq4(struct sock *sk
67820 0, /* non standard timer */
67821 0, /* open_requests have no inode */
67822 atomic_read(&sk->sk_refcnt),
67823 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67824 + NULL,
67825 +#else
67826 req,
67827 +#endif
67828 len);
67829 }
67830
67831 @@ -2279,7 +2301,12 @@ static void get_tcp4_sock(struct sock *s
67832 sock_i_uid(sk),
67833 icsk->icsk_probes_out,
67834 sock_i_ino(sk),
67835 - atomic_read(&sk->sk_refcnt), sk,
67836 + atomic_read(&sk->sk_refcnt),
67837 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67838 + NULL,
67839 +#else
67840 + sk,
67841 +#endif
67842 jiffies_to_clock_t(icsk->icsk_rto),
67843 jiffies_to_clock_t(icsk->icsk_ack.ato),
67844 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
67845 @@ -2307,7 +2334,13 @@ static void get_timewait4_sock(struct in
67846 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
67847 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
67848 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
67849 - atomic_read(&tw->tw_refcnt), tw, len);
67850 + atomic_read(&tw->tw_refcnt),
67851 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67852 + NULL,
67853 +#else
67854 + tw,
67855 +#endif
67856 + len);
67857 }
67858
67859 #define TMPSZ 150
67860 diff -urNp linux-2.6.32.42/net/ipv4/tcp_minisocks.c linux-2.6.32.42/net/ipv4/tcp_minisocks.c
67861 --- linux-2.6.32.42/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
67862 +++ linux-2.6.32.42/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
67863 @@ -26,6 +26,10 @@
67864 #include <net/inet_common.h>
67865 #include <net/xfrm.h>
67866
67867 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67868 +extern int grsec_enable_blackhole;
67869 +#endif
67870 +
67871 #ifdef CONFIG_SYSCTL
67872 #define SYNC_INIT 0 /* let the user enable it */
67873 #else
67874 @@ -672,6 +676,10 @@ listen_overflow:
67875
67876 embryonic_reset:
67877 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
67878 +
67879 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67880 + if (!grsec_enable_blackhole)
67881 +#endif
67882 if (!(flg & TCP_FLAG_RST))
67883 req->rsk_ops->send_reset(sk, skb);
67884
67885 diff -urNp linux-2.6.32.42/net/ipv4/tcp_output.c linux-2.6.32.42/net/ipv4/tcp_output.c
67886 --- linux-2.6.32.42/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
67887 +++ linux-2.6.32.42/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
67888 @@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
67889 __u8 *md5_hash_location;
67890 int mss;
67891
67892 + pax_track_stack();
67893 +
67894 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
67895 if (skb == NULL)
67896 return NULL;
67897 diff -urNp linux-2.6.32.42/net/ipv4/tcp_probe.c linux-2.6.32.42/net/ipv4/tcp_probe.c
67898 --- linux-2.6.32.42/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
67899 +++ linux-2.6.32.42/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
67900 @@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
67901 if (cnt + width >= len)
67902 break;
67903
67904 - if (copy_to_user(buf + cnt, tbuf, width))
67905 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
67906 return -EFAULT;
67907 cnt += width;
67908 }
67909 diff -urNp linux-2.6.32.42/net/ipv4/tcp_timer.c linux-2.6.32.42/net/ipv4/tcp_timer.c
67910 --- linux-2.6.32.42/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
67911 +++ linux-2.6.32.42/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
67912 @@ -21,6 +21,10 @@
67913 #include <linux/module.h>
67914 #include <net/tcp.h>
67915
67916 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67917 +extern int grsec_lastack_retries;
67918 +#endif
67919 +
67920 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
67921 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
67922 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
67923 @@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
67924 }
67925 }
67926
67927 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67928 + if ((sk->sk_state == TCP_LAST_ACK) &&
67929 + (grsec_lastack_retries > 0) &&
67930 + (grsec_lastack_retries < retry_until))
67931 + retry_until = grsec_lastack_retries;
67932 +#endif
67933 +
67934 if (retransmits_timed_out(sk, retry_until)) {
67935 /* Has it gone just too far? */
67936 tcp_write_err(sk);
67937 diff -urNp linux-2.6.32.42/net/ipv4/udp.c linux-2.6.32.42/net/ipv4/udp.c
67938 --- linux-2.6.32.42/net/ipv4/udp.c 2011-03-27 14:31:47.000000000 -0400
67939 +++ linux-2.6.32.42/net/ipv4/udp.c 2011-05-04 17:57:28.000000000 -0400
67940 @@ -86,6 +86,7 @@
67941 #include <linux/types.h>
67942 #include <linux/fcntl.h>
67943 #include <linux/module.h>
67944 +#include <linux/security.h>
67945 #include <linux/socket.h>
67946 #include <linux/sockios.h>
67947 #include <linux/igmp.h>
67948 @@ -106,6 +107,10 @@
67949 #include <net/xfrm.h>
67950 #include "udp_impl.h"
67951
67952 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
67953 +extern int grsec_enable_blackhole;
67954 +#endif
67955 +
67956 struct udp_table udp_table;
67957 EXPORT_SYMBOL(udp_table);
67958
67959 @@ -371,6 +376,9 @@ found:
67960 return s;
67961 }
67962
67963 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
67964 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
67965 +
67966 /*
67967 * This routine is called by the ICMP module when it gets some
67968 * sort of error condition. If err < 0 then the socket should
67969 @@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
67970 dport = usin->sin_port;
67971 if (dport == 0)
67972 return -EINVAL;
67973 +
67974 + err = gr_search_udp_sendmsg(sk, usin);
67975 + if (err)
67976 + return err;
67977 } else {
67978 if (sk->sk_state != TCP_ESTABLISHED)
67979 return -EDESTADDRREQ;
67980 +
67981 + err = gr_search_udp_sendmsg(sk, NULL);
67982 + if (err)
67983 + return err;
67984 +
67985 daddr = inet->daddr;
67986 dport = inet->dport;
67987 /* Open fast path for connected socket.
67988 @@ -945,6 +962,10 @@ try_again:
67989 if (!skb)
67990 goto out;
67991
67992 + err = gr_search_udp_recvmsg(sk, skb);
67993 + if (err)
67994 + goto out_free;
67995 +
67996 ulen = skb->len - sizeof(struct udphdr);
67997 copied = len;
67998 if (copied > ulen)
67999 @@ -1065,7 +1086,7 @@ static int __udp_queue_rcv_skb(struct so
68000 if (rc == -ENOMEM) {
68001 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
68002 is_udplite);
68003 - atomic_inc(&sk->sk_drops);
68004 + atomic_inc_unchecked(&sk->sk_drops);
68005 }
68006 goto drop;
68007 }
68008 @@ -1335,6 +1356,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
68009 goto csum_error;
68010
68011 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
68012 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68013 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68014 +#endif
68015 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
68016
68017 /*
68018 @@ -1755,8 +1779,13 @@ static void udp4_format_sock(struct sock
68019 sk_wmem_alloc_get(sp),
68020 sk_rmem_alloc_get(sp),
68021 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68022 - atomic_read(&sp->sk_refcnt), sp,
68023 - atomic_read(&sp->sk_drops), len);
68024 + atomic_read(&sp->sk_refcnt),
68025 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68026 + NULL,
68027 +#else
68028 + sp,
68029 +#endif
68030 + atomic_read_unchecked(&sp->sk_drops), len);
68031 }
68032
68033 int udp4_seq_show(struct seq_file *seq, void *v)
68034 diff -urNp linux-2.6.32.42/net/ipv6/inet6_connection_sock.c linux-2.6.32.42/net/ipv6/inet6_connection_sock.c
68035 --- linux-2.6.32.42/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
68036 +++ linux-2.6.32.42/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
68037 @@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
68038 #ifdef CONFIG_XFRM
68039 {
68040 struct rt6_info *rt = (struct rt6_info *)dst;
68041 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
68042 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
68043 }
68044 #endif
68045 }
68046 @@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
68047 #ifdef CONFIG_XFRM
68048 if (dst) {
68049 struct rt6_info *rt = (struct rt6_info *)dst;
68050 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
68051 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
68052 sk->sk_dst_cache = NULL;
68053 dst_release(dst);
68054 dst = NULL;
68055 diff -urNp linux-2.6.32.42/net/ipv6/inet6_hashtables.c linux-2.6.32.42/net/ipv6/inet6_hashtables.c
68056 --- linux-2.6.32.42/net/ipv6/inet6_hashtables.c 2011-03-27 14:31:47.000000000 -0400
68057 +++ linux-2.6.32.42/net/ipv6/inet6_hashtables.c 2011-05-04 17:56:28.000000000 -0400
68058 @@ -118,7 +118,7 @@ out:
68059 }
68060 EXPORT_SYMBOL(__inet6_lookup_established);
68061
68062 -static int inline compute_score(struct sock *sk, struct net *net,
68063 +static inline int compute_score(struct sock *sk, struct net *net,
68064 const unsigned short hnum,
68065 const struct in6_addr *daddr,
68066 const int dif)
68067 diff -urNp linux-2.6.32.42/net/ipv6/ipv6_sockglue.c linux-2.6.32.42/net/ipv6/ipv6_sockglue.c
68068 --- linux-2.6.32.42/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
68069 +++ linux-2.6.32.42/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
68070 @@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
68071 int val, valbool;
68072 int retv = -ENOPROTOOPT;
68073
68074 + pax_track_stack();
68075 +
68076 if (optval == NULL)
68077 val=0;
68078 else {
68079 @@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
68080 int len;
68081 int val;
68082
68083 + pax_track_stack();
68084 +
68085 if (ip6_mroute_opt(optname))
68086 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
68087
68088 diff -urNp linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c
68089 --- linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
68090 +++ linux-2.6.32.42/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
68091 @@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
68092 private = &tmp;
68093 }
68094 #endif
68095 + memset(&info, 0, sizeof(info));
68096 info.valid_hooks = t->valid_hooks;
68097 memcpy(info.hook_entry, private->hook_entry,
68098 sizeof(info.hook_entry));
68099 diff -urNp linux-2.6.32.42/net/ipv6/raw.c linux-2.6.32.42/net/ipv6/raw.c
68100 --- linux-2.6.32.42/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
68101 +++ linux-2.6.32.42/net/ipv6/raw.c 2011-05-16 21:46:57.000000000 -0400
68102 @@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
68103 {
68104 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
68105 skb_checksum_complete(skb)) {
68106 - atomic_inc(&sk->sk_drops);
68107 + atomic_inc_unchecked(&sk->sk_drops);
68108 kfree_skb(skb);
68109 return NET_RX_DROP;
68110 }
68111
68112 /* Charge it to the socket. */
68113 if (sock_queue_rcv_skb(sk,skb)<0) {
68114 - atomic_inc(&sk->sk_drops);
68115 + atomic_inc_unchecked(&sk->sk_drops);
68116 kfree_skb(skb);
68117 return NET_RX_DROP;
68118 }
68119 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68120 struct raw6_sock *rp = raw6_sk(sk);
68121
68122 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
68123 - atomic_inc(&sk->sk_drops);
68124 + atomic_inc_unchecked(&sk->sk_drops);
68125 kfree_skb(skb);
68126 return NET_RX_DROP;
68127 }
68128 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68129
68130 if (inet->hdrincl) {
68131 if (skb_checksum_complete(skb)) {
68132 - atomic_inc(&sk->sk_drops);
68133 + atomic_inc_unchecked(&sk->sk_drops);
68134 kfree_skb(skb);
68135 return NET_RX_DROP;
68136 }
68137 @@ -518,7 +518,7 @@ csum_copy_err:
68138 as some normal condition.
68139 */
68140 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
68141 - atomic_inc(&sk->sk_drops);
68142 + atomic_inc_unchecked(&sk->sk_drops);
68143 goto out;
68144 }
68145
68146 @@ -600,7 +600,7 @@ out:
68147 return err;
68148 }
68149
68150 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
68151 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
68152 struct flowi *fl, struct rt6_info *rt,
68153 unsigned int flags)
68154 {
68155 @@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
68156 u16 proto;
68157 int err;
68158
68159 + pax_track_stack();
68160 +
68161 /* Rough check on arithmetic overflow,
68162 better check is made in ip6_append_data().
68163 */
68164 @@ -916,12 +918,17 @@ do_confirm:
68165 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
68166 char __user *optval, int optlen)
68167 {
68168 + struct icmp6_filter filter;
68169 +
68170 switch (optname) {
68171 case ICMPV6_FILTER:
68172 + if (optlen < 0)
68173 + return -EINVAL;
68174 if (optlen > sizeof(struct icmp6_filter))
68175 optlen = sizeof(struct icmp6_filter);
68176 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
68177 + if (copy_from_user(&filter, optval, optlen))
68178 return -EFAULT;
68179 + memcpy(&raw6_sk(sk)->filter, &filter, optlen);
68180 return 0;
68181 default:
68182 return -ENOPROTOOPT;
68183 @@ -933,6 +940,7 @@ static int rawv6_seticmpfilter(struct so
68184 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
68185 char __user *optval, int __user *optlen)
68186 {
68187 + struct icmp6_filter filter;
68188 int len;
68189
68190 switch (optname) {
68191 @@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
68192 len = sizeof(struct icmp6_filter);
68193 if (put_user(len, optlen))
68194 return -EFAULT;
68195 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
68196 + memcpy(&filter, &raw6_sk(sk)->filter, len);
68197 + if (copy_to_user(optval, &filter, len))
68198 return -EFAULT;
68199 return 0;
68200 default:
68201 @@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
68202 0, 0L, 0,
68203 sock_i_uid(sp), 0,
68204 sock_i_ino(sp),
68205 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68206 + atomic_read(&sp->sk_refcnt),
68207 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68208 + NULL,
68209 +#else
68210 + sp,
68211 +#endif
68212 + atomic_read_unchecked(&sp->sk_drops));
68213 }
68214
68215 static int raw6_seq_show(struct seq_file *seq, void *v)
68216 diff -urNp linux-2.6.32.42/net/ipv6/tcp_ipv6.c linux-2.6.32.42/net/ipv6/tcp_ipv6.c
68217 --- linux-2.6.32.42/net/ipv6/tcp_ipv6.c 2011-03-27 14:31:47.000000000 -0400
68218 +++ linux-2.6.32.42/net/ipv6/tcp_ipv6.c 2011-04-17 15:56:46.000000000 -0400
68219 @@ -88,6 +88,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
68220 }
68221 #endif
68222
68223 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68224 +extern int grsec_enable_blackhole;
68225 +#endif
68226 +
68227 static void tcp_v6_hash(struct sock *sk)
68228 {
68229 if (sk->sk_state != TCP_CLOSE) {
68230 @@ -1578,6 +1582,9 @@ static int tcp_v6_do_rcv(struct sock *sk
68231 return 0;
68232
68233 reset:
68234 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68235 + if (!grsec_enable_blackhole)
68236 +#endif
68237 tcp_v6_send_reset(sk, skb);
68238 discard:
68239 if (opt_skb)
68240 @@ -1655,12 +1662,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
68241 TCP_SKB_CB(skb)->sacked = 0;
68242
68243 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68244 - if (!sk)
68245 + if (!sk) {
68246 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68247 + ret = 1;
68248 +#endif
68249 goto no_tcp_socket;
68250 + }
68251
68252 process:
68253 - if (sk->sk_state == TCP_TIME_WAIT)
68254 + if (sk->sk_state == TCP_TIME_WAIT) {
68255 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68256 + ret = 2;
68257 +#endif
68258 goto do_time_wait;
68259 + }
68260
68261 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
68262 goto discard_and_relse;
68263 @@ -1700,6 +1715,10 @@ no_tcp_socket:
68264 bad_packet:
68265 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
68266 } else {
68267 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68268 + if (!grsec_enable_blackhole || (ret == 1 &&
68269 + (skb->dev->flags & IFF_LOOPBACK)))
68270 +#endif
68271 tcp_v6_send_reset(NULL, skb);
68272 }
68273
68274 @@ -1915,7 +1934,13 @@ static void get_openreq6(struct seq_file
68275 uid,
68276 0, /* non standard timer */
68277 0, /* open_requests have no inode */
68278 - 0, req);
68279 + 0,
68280 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68281 + NULL
68282 +#else
68283 + req
68284 +#endif
68285 + );
68286 }
68287
68288 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
68289 @@ -1965,7 +1990,12 @@ static void get_tcp6_sock(struct seq_fil
68290 sock_i_uid(sp),
68291 icsk->icsk_probes_out,
68292 sock_i_ino(sp),
68293 - atomic_read(&sp->sk_refcnt), sp,
68294 + atomic_read(&sp->sk_refcnt),
68295 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68296 + NULL,
68297 +#else
68298 + sp,
68299 +#endif
68300 jiffies_to_clock_t(icsk->icsk_rto),
68301 jiffies_to_clock_t(icsk->icsk_ack.ato),
68302 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
68303 @@ -2000,7 +2030,13 @@ static void get_timewait6_sock(struct se
68304 dest->s6_addr32[2], dest->s6_addr32[3], destp,
68305 tw->tw_substate, 0, 0,
68306 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68307 - atomic_read(&tw->tw_refcnt), tw);
68308 + atomic_read(&tw->tw_refcnt),
68309 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68310 + NULL
68311 +#else
68312 + tw
68313 +#endif
68314 + );
68315 }
68316
68317 static int tcp6_seq_show(struct seq_file *seq, void *v)
68318 diff -urNp linux-2.6.32.42/net/ipv6/udp.c linux-2.6.32.42/net/ipv6/udp.c
68319 --- linux-2.6.32.42/net/ipv6/udp.c 2011-03-27 14:31:47.000000000 -0400
68320 +++ linux-2.6.32.42/net/ipv6/udp.c 2011-05-04 17:58:16.000000000 -0400
68321 @@ -49,6 +49,10 @@
68322 #include <linux/seq_file.h>
68323 #include "udp_impl.h"
68324
68325 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68326 +extern int grsec_enable_blackhole;
68327 +#endif
68328 +
68329 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
68330 {
68331 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
68332 @@ -388,7 +392,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
68333 if (rc == -ENOMEM) {
68334 UDP6_INC_STATS_BH(sock_net(sk),
68335 UDP_MIB_RCVBUFERRORS, is_udplite);
68336 - atomic_inc(&sk->sk_drops);
68337 + atomic_inc_unchecked(&sk->sk_drops);
68338 }
68339 goto drop;
68340 }
68341 @@ -587,6 +591,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
68342 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
68343 proto == IPPROTO_UDPLITE);
68344
68345 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68346 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68347 +#endif
68348 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
68349
68350 kfree_skb(skb);
68351 @@ -1206,8 +1213,13 @@ static void udp6_sock_seq_show(struct se
68352 0, 0L, 0,
68353 sock_i_uid(sp), 0,
68354 sock_i_ino(sp),
68355 - atomic_read(&sp->sk_refcnt), sp,
68356 - atomic_read(&sp->sk_drops));
68357 + atomic_read(&sp->sk_refcnt),
68358 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68359 + NULL,
68360 +#else
68361 + sp,
68362 +#endif
68363 + atomic_read_unchecked(&sp->sk_drops));
68364 }
68365
68366 int udp6_seq_show(struct seq_file *seq, void *v)
68367 diff -urNp linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c
68368 --- linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
68369 +++ linux-2.6.32.42/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
68370 @@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
68371 add_wait_queue(&self->open_wait, &wait);
68372
68373 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
68374 - __FILE__,__LINE__, tty->driver->name, self->open_count );
68375 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68376
68377 /* As far as I can see, we protect open_count - Jean II */
68378 spin_lock_irqsave(&self->spinlock, flags);
68379 if (!tty_hung_up_p(filp)) {
68380 extra_count = 1;
68381 - self->open_count--;
68382 + local_dec(&self->open_count);
68383 }
68384 spin_unlock_irqrestore(&self->spinlock, flags);
68385 - self->blocked_open++;
68386 + local_inc(&self->blocked_open);
68387
68388 while (1) {
68389 if (tty->termios->c_cflag & CBAUD) {
68390 @@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
68391 }
68392
68393 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
68394 - __FILE__,__LINE__, tty->driver->name, self->open_count );
68395 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
68396
68397 schedule();
68398 }
68399 @@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
68400 if (extra_count) {
68401 /* ++ is not atomic, so this should be protected - Jean II */
68402 spin_lock_irqsave(&self->spinlock, flags);
68403 - self->open_count++;
68404 + local_inc(&self->open_count);
68405 spin_unlock_irqrestore(&self->spinlock, flags);
68406 }
68407 - self->blocked_open--;
68408 + local_dec(&self->blocked_open);
68409
68410 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
68411 - __FILE__,__LINE__, tty->driver->name, self->open_count);
68412 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
68413
68414 if (!retval)
68415 self->flags |= ASYNC_NORMAL_ACTIVE;
68416 @@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
68417 }
68418 /* ++ is not atomic, so this should be protected - Jean II */
68419 spin_lock_irqsave(&self->spinlock, flags);
68420 - self->open_count++;
68421 + local_inc(&self->open_count);
68422
68423 tty->driver_data = self;
68424 self->tty = tty;
68425 spin_unlock_irqrestore(&self->spinlock, flags);
68426
68427 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
68428 - self->line, self->open_count);
68429 + self->line, local_read(&self->open_count));
68430
68431 /* Not really used by us, but lets do it anyway */
68432 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
68433 @@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
68434 return;
68435 }
68436
68437 - if ((tty->count == 1) && (self->open_count != 1)) {
68438 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
68439 /*
68440 * Uh, oh. tty->count is 1, which means that the tty
68441 * structure will be freed. state->count should always
68442 @@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
68443 */
68444 IRDA_DEBUG(0, "%s(), bad serial port count; "
68445 "tty->count is 1, state->count is %d\n", __func__ ,
68446 - self->open_count);
68447 - self->open_count = 1;
68448 + local_read(&self->open_count));
68449 + local_set(&self->open_count, 1);
68450 }
68451
68452 - if (--self->open_count < 0) {
68453 + if (local_dec_return(&self->open_count) < 0) {
68454 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
68455 - __func__, self->line, self->open_count);
68456 - self->open_count = 0;
68457 + __func__, self->line, local_read(&self->open_count));
68458 + local_set(&self->open_count, 0);
68459 }
68460 - if (self->open_count) {
68461 + if (local_read(&self->open_count)) {
68462 spin_unlock_irqrestore(&self->spinlock, flags);
68463
68464 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
68465 @@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
68466 tty->closing = 0;
68467 self->tty = NULL;
68468
68469 - if (self->blocked_open) {
68470 + if (local_read(&self->blocked_open)) {
68471 if (self->close_delay)
68472 schedule_timeout_interruptible(self->close_delay);
68473 wake_up_interruptible(&self->open_wait);
68474 @@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
68475 spin_lock_irqsave(&self->spinlock, flags);
68476 self->flags &= ~ASYNC_NORMAL_ACTIVE;
68477 self->tty = NULL;
68478 - self->open_count = 0;
68479 + local_set(&self->open_count, 0);
68480 spin_unlock_irqrestore(&self->spinlock, flags);
68481
68482 wake_up_interruptible(&self->open_wait);
68483 @@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
68484 seq_putc(m, '\n');
68485
68486 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
68487 - seq_printf(m, "Open count: %d\n", self->open_count);
68488 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
68489 seq_printf(m, "Max data size: %d\n", self->max_data_size);
68490 seq_printf(m, "Max header size: %d\n", self->max_header_size);
68491
68492 diff -urNp linux-2.6.32.42/net/iucv/af_iucv.c linux-2.6.32.42/net/iucv/af_iucv.c
68493 --- linux-2.6.32.42/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
68494 +++ linux-2.6.32.42/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
68495 @@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
68496
68497 write_lock_bh(&iucv_sk_list.lock);
68498
68499 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
68500 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
68501 while (__iucv_get_sock_by_name(name)) {
68502 sprintf(name, "%08x",
68503 - atomic_inc_return(&iucv_sk_list.autobind_name));
68504 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
68505 }
68506
68507 write_unlock_bh(&iucv_sk_list.lock);
68508 diff -urNp linux-2.6.32.42/net/key/af_key.c linux-2.6.32.42/net/key/af_key.c
68509 --- linux-2.6.32.42/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
68510 +++ linux-2.6.32.42/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
68511 @@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
68512 struct xfrm_migrate m[XFRM_MAX_DEPTH];
68513 struct xfrm_kmaddress k;
68514
68515 + pax_track_stack();
68516 +
68517 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
68518 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
68519 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
68520 @@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
68521 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
68522 else
68523 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
68524 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68525 + NULL,
68526 +#else
68527 s,
68528 +#endif
68529 atomic_read(&s->sk_refcnt),
68530 sk_rmem_alloc_get(s),
68531 sk_wmem_alloc_get(s),
68532 diff -urNp linux-2.6.32.42/net/mac80211/cfg.c linux-2.6.32.42/net/mac80211/cfg.c
68533 --- linux-2.6.32.42/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
68534 +++ linux-2.6.32.42/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
68535 @@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
68536 return err;
68537 }
68538
68539 -struct cfg80211_ops mac80211_config_ops = {
68540 +const struct cfg80211_ops mac80211_config_ops = {
68541 .add_virtual_intf = ieee80211_add_iface,
68542 .del_virtual_intf = ieee80211_del_iface,
68543 .change_virtual_intf = ieee80211_change_iface,
68544 diff -urNp linux-2.6.32.42/net/mac80211/cfg.h linux-2.6.32.42/net/mac80211/cfg.h
68545 --- linux-2.6.32.42/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
68546 +++ linux-2.6.32.42/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
68547 @@ -4,6 +4,6 @@
68548 #ifndef __CFG_H
68549 #define __CFG_H
68550
68551 -extern struct cfg80211_ops mac80211_config_ops;
68552 +extern const struct cfg80211_ops mac80211_config_ops;
68553
68554 #endif /* __CFG_H */
68555 diff -urNp linux-2.6.32.42/net/mac80211/debugfs_key.c linux-2.6.32.42/net/mac80211/debugfs_key.c
68556 --- linux-2.6.32.42/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
68557 +++ linux-2.6.32.42/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
68558 @@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
68559 size_t count, loff_t *ppos)
68560 {
68561 struct ieee80211_key *key = file->private_data;
68562 - int i, res, bufsize = 2 * key->conf.keylen + 2;
68563 + int i, bufsize = 2 * key->conf.keylen + 2;
68564 char *buf = kmalloc(bufsize, GFP_KERNEL);
68565 char *p = buf;
68566 + ssize_t res;
68567 +
68568 + if (buf == NULL)
68569 + return -ENOMEM;
68570
68571 for (i = 0; i < key->conf.keylen; i++)
68572 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
68573 diff -urNp linux-2.6.32.42/net/mac80211/debugfs_sta.c linux-2.6.32.42/net/mac80211/debugfs_sta.c
68574 --- linux-2.6.32.42/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
68575 +++ linux-2.6.32.42/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
68576 @@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
68577 int i;
68578 struct sta_info *sta = file->private_data;
68579
68580 + pax_track_stack();
68581 +
68582 spin_lock_bh(&sta->lock);
68583 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
68584 sta->ampdu_mlme.dialog_token_allocator + 1);
68585 diff -urNp linux-2.6.32.42/net/mac80211/ieee80211_i.h linux-2.6.32.42/net/mac80211/ieee80211_i.h
68586 --- linux-2.6.32.42/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
68587 +++ linux-2.6.32.42/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
68588 @@ -25,6 +25,7 @@
68589 #include <linux/etherdevice.h>
68590 #include <net/cfg80211.h>
68591 #include <net/mac80211.h>
68592 +#include <asm/local.h>
68593 #include "key.h"
68594 #include "sta_info.h"
68595
68596 @@ -635,7 +636,7 @@ struct ieee80211_local {
68597 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
68598 spinlock_t queue_stop_reason_lock;
68599
68600 - int open_count;
68601 + local_t open_count;
68602 int monitors, cooked_mntrs;
68603 /* number of interfaces with corresponding FIF_ flags */
68604 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
68605 diff -urNp linux-2.6.32.42/net/mac80211/iface.c linux-2.6.32.42/net/mac80211/iface.c
68606 --- linux-2.6.32.42/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
68607 +++ linux-2.6.32.42/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
68608 @@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
68609 break;
68610 }
68611
68612 - if (local->open_count == 0) {
68613 + if (local_read(&local->open_count) == 0) {
68614 res = drv_start(local);
68615 if (res)
68616 goto err_del_bss;
68617 @@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
68618 * Validate the MAC address for this device.
68619 */
68620 if (!is_valid_ether_addr(dev->dev_addr)) {
68621 - if (!local->open_count)
68622 + if (!local_read(&local->open_count))
68623 drv_stop(local);
68624 return -EADDRNOTAVAIL;
68625 }
68626 @@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
68627
68628 hw_reconf_flags |= __ieee80211_recalc_idle(local);
68629
68630 - local->open_count++;
68631 + local_inc(&local->open_count);
68632 if (hw_reconf_flags) {
68633 ieee80211_hw_config(local, hw_reconf_flags);
68634 /*
68635 @@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
68636 err_del_interface:
68637 drv_remove_interface(local, &conf);
68638 err_stop:
68639 - if (!local->open_count)
68640 + if (!local_read(&local->open_count))
68641 drv_stop(local);
68642 err_del_bss:
68643 sdata->bss = NULL;
68644 @@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
68645 WARN_ON(!list_empty(&sdata->u.ap.vlans));
68646 }
68647
68648 - local->open_count--;
68649 + local_dec(&local->open_count);
68650
68651 switch (sdata->vif.type) {
68652 case NL80211_IFTYPE_AP_VLAN:
68653 @@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
68654
68655 ieee80211_recalc_ps(local, -1);
68656
68657 - if (local->open_count == 0) {
68658 + if (local_read(&local->open_count) == 0) {
68659 ieee80211_clear_tx_pending(local);
68660 ieee80211_stop_device(local);
68661
68662 diff -urNp linux-2.6.32.42/net/mac80211/main.c linux-2.6.32.42/net/mac80211/main.c
68663 --- linux-2.6.32.42/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
68664 +++ linux-2.6.32.42/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
68665 @@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
68666 local->hw.conf.power_level = power;
68667 }
68668
68669 - if (changed && local->open_count) {
68670 + if (changed && local_read(&local->open_count)) {
68671 ret = drv_config(local, changed);
68672 /*
68673 * Goal:
68674 diff -urNp linux-2.6.32.42/net/mac80211/mlme.c linux-2.6.32.42/net/mac80211/mlme.c
68675 --- linux-2.6.32.42/net/mac80211/mlme.c 2011-03-27 14:31:47.000000000 -0400
68676 +++ linux-2.6.32.42/net/mac80211/mlme.c 2011-05-16 21:46:57.000000000 -0400
68677 @@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
68678 bool have_higher_than_11mbit = false, newsta = false;
68679 u16 ap_ht_cap_flags;
68680
68681 + pax_track_stack();
68682 +
68683 /*
68684 * AssocResp and ReassocResp have identical structure, so process both
68685 * of them in this function.
68686 diff -urNp linux-2.6.32.42/net/mac80211/pm.c linux-2.6.32.42/net/mac80211/pm.c
68687 --- linux-2.6.32.42/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
68688 +++ linux-2.6.32.42/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
68689 @@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
68690 }
68691
68692 /* stop hardware - this must stop RX */
68693 - if (local->open_count)
68694 + if (local_read(&local->open_count))
68695 ieee80211_stop_device(local);
68696
68697 local->suspended = true;
68698 diff -urNp linux-2.6.32.42/net/mac80211/rate.c linux-2.6.32.42/net/mac80211/rate.c
68699 --- linux-2.6.32.42/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
68700 +++ linux-2.6.32.42/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
68701 @@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
68702 struct rate_control_ref *ref, *old;
68703
68704 ASSERT_RTNL();
68705 - if (local->open_count)
68706 + if (local_read(&local->open_count))
68707 return -EBUSY;
68708
68709 ref = rate_control_alloc(name, local);
68710 diff -urNp linux-2.6.32.42/net/mac80211/tx.c linux-2.6.32.42/net/mac80211/tx.c
68711 --- linux-2.6.32.42/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
68712 +++ linux-2.6.32.42/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
68713 @@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
68714 return cpu_to_le16(dur);
68715 }
68716
68717 -static int inline is_ieee80211_device(struct ieee80211_local *local,
68718 +static inline int is_ieee80211_device(struct ieee80211_local *local,
68719 struct net_device *dev)
68720 {
68721 return local == wdev_priv(dev->ieee80211_ptr);
68722 diff -urNp linux-2.6.32.42/net/mac80211/util.c linux-2.6.32.42/net/mac80211/util.c
68723 --- linux-2.6.32.42/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
68724 +++ linux-2.6.32.42/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
68725 @@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
68726 local->resuming = true;
68727
68728 /* restart hardware */
68729 - if (local->open_count) {
68730 + if (local_read(&local->open_count)) {
68731 /*
68732 * Upon resume hardware can sometimes be goofy due to
68733 * various platform / driver / bus issues, so restarting
68734 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c
68735 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
68736 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
68737 @@ -564,7 +564,7 @@ static const struct file_operations ip_v
68738 .open = ip_vs_app_open,
68739 .read = seq_read,
68740 .llseek = seq_lseek,
68741 - .release = seq_release,
68742 + .release = seq_release_net,
68743 };
68744 #endif
68745
68746 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c
68747 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
68748 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
68749 @@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
68750 /* if the connection is not template and is created
68751 * by sync, preserve the activity flag.
68752 */
68753 - cp->flags |= atomic_read(&dest->conn_flags) &
68754 + cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
68755 (~IP_VS_CONN_F_INACTIVE);
68756 else
68757 - cp->flags |= atomic_read(&dest->conn_flags);
68758 + cp->flags |= atomic_read_unchecked(&dest->conn_flags);
68759 cp->dest = dest;
68760
68761 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
68762 @@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
68763 atomic_set(&cp->refcnt, 1);
68764
68765 atomic_set(&cp->n_control, 0);
68766 - atomic_set(&cp->in_pkts, 0);
68767 + atomic_set_unchecked(&cp->in_pkts, 0);
68768
68769 atomic_inc(&ip_vs_conn_count);
68770 if (flags & IP_VS_CONN_F_NO_CPORT)
68771 @@ -871,7 +871,7 @@ static const struct file_operations ip_v
68772 .open = ip_vs_conn_open,
68773 .read = seq_read,
68774 .llseek = seq_lseek,
68775 - .release = seq_release,
68776 + .release = seq_release_net,
68777 };
68778
68779 static const char *ip_vs_origin_name(unsigned flags)
68780 @@ -934,7 +934,7 @@ static const struct file_operations ip_v
68781 .open = ip_vs_conn_sync_open,
68782 .read = seq_read,
68783 .llseek = seq_lseek,
68784 - .release = seq_release,
68785 + .release = seq_release_net,
68786 };
68787
68788 #endif
68789 @@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
68790
68791 /* Don't drop the entry if its number of incoming packets is not
68792 located in [0, 8] */
68793 - i = atomic_read(&cp->in_pkts);
68794 + i = atomic_read_unchecked(&cp->in_pkts);
68795 if (i > 8 || i < 0) return 0;
68796
68797 if (!todrop_rate[i]) return 0;
68798 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c
68799 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
68800 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
68801 @@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
68802 ret = cp->packet_xmit(skb, cp, pp);
68803 /* do not touch skb anymore */
68804
68805 - atomic_inc(&cp->in_pkts);
68806 + atomic_inc_unchecked(&cp->in_pkts);
68807 ip_vs_conn_put(cp);
68808 return ret;
68809 }
68810 @@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
68811 * Sync connection if it is about to close to
68812 * encorage the standby servers to update the connections timeout
68813 */
68814 - pkts = atomic_add_return(1, &cp->in_pkts);
68815 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
68816 if (af == AF_INET &&
68817 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
68818 (((cp->protocol != IPPROTO_TCP ||
68819 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c
68820 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
68821 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
68822 @@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
68823 ip_vs_rs_hash(dest);
68824 write_unlock_bh(&__ip_vs_rs_lock);
68825 }
68826 - atomic_set(&dest->conn_flags, conn_flags);
68827 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
68828
68829 /* bind the service */
68830 if (!dest->svc) {
68831 @@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
68832 " %-7s %-6d %-10d %-10d\n",
68833 &dest->addr.in6,
68834 ntohs(dest->port),
68835 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
68836 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
68837 atomic_read(&dest->weight),
68838 atomic_read(&dest->activeconns),
68839 atomic_read(&dest->inactconns));
68840 @@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
68841 "%-7s %-6d %-10d %-10d\n",
68842 ntohl(dest->addr.ip),
68843 ntohs(dest->port),
68844 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
68845 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
68846 atomic_read(&dest->weight),
68847 atomic_read(&dest->activeconns),
68848 atomic_read(&dest->inactconns));
68849 @@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
68850 .open = ip_vs_info_open,
68851 .read = seq_read,
68852 .llseek = seq_lseek,
68853 - .release = seq_release_private,
68854 + .release = seq_release_net,
68855 };
68856
68857 #endif
68858 @@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
68859 .open = ip_vs_stats_seq_open,
68860 .read = seq_read,
68861 .llseek = seq_lseek,
68862 - .release = single_release,
68863 + .release = single_release_net,
68864 };
68865
68866 #endif
68867 @@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
68868
68869 entry.addr = dest->addr.ip;
68870 entry.port = dest->port;
68871 - entry.conn_flags = atomic_read(&dest->conn_flags);
68872 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
68873 entry.weight = atomic_read(&dest->weight);
68874 entry.u_threshold = dest->u_threshold;
68875 entry.l_threshold = dest->l_threshold;
68876 @@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
68877 unsigned char arg[128];
68878 int ret = 0;
68879
68880 + pax_track_stack();
68881 +
68882 if (!capable(CAP_NET_ADMIN))
68883 return -EPERM;
68884
68885 @@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
68886 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
68887
68888 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
68889 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
68890 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
68891 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
68892 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
68893 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
68894 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c
68895 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
68896 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
68897 @@ -438,7 +438,7 @@ static void ip_vs_process_message(const
68898
68899 if (opt)
68900 memcpy(&cp->in_seq, opt, sizeof(*opt));
68901 - atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
68902 + atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
68903 cp->state = state;
68904 cp->old_state = cp->state;
68905 /*
68906 diff -urNp linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c
68907 --- linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
68908 +++ linux-2.6.32.42/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
68909 @@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
68910 else
68911 rc = NF_ACCEPT;
68912 /* do not touch skb anymore */
68913 - atomic_inc(&cp->in_pkts);
68914 + atomic_inc_unchecked(&cp->in_pkts);
68915 goto out;
68916 }
68917
68918 @@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
68919 else
68920 rc = NF_ACCEPT;
68921 /* do not touch skb anymore */
68922 - atomic_inc(&cp->in_pkts);
68923 + atomic_inc_unchecked(&cp->in_pkts);
68924 goto out;
68925 }
68926
68927 diff -urNp linux-2.6.32.42/net/netfilter/Kconfig linux-2.6.32.42/net/netfilter/Kconfig
68928 --- linux-2.6.32.42/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
68929 +++ linux-2.6.32.42/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
68930 @@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
68931
68932 To compile it as a module, choose M here. If unsure, say N.
68933
68934 +config NETFILTER_XT_MATCH_GRADM
68935 + tristate '"gradm" match support'
68936 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
68937 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
68938 + ---help---
68939 + The gradm match allows to match on grsecurity RBAC being enabled.
68940 + It is useful when iptables rules are applied early on bootup to
68941 + prevent connections to the machine (except from a trusted host)
68942 + while the RBAC system is disabled.
68943 +
68944 config NETFILTER_XT_MATCH_HASHLIMIT
68945 tristate '"hashlimit" match support'
68946 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
68947 diff -urNp linux-2.6.32.42/net/netfilter/Makefile linux-2.6.32.42/net/netfilter/Makefile
68948 --- linux-2.6.32.42/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
68949 +++ linux-2.6.32.42/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
68950 @@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
68951 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
68952 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
68953 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
68954 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
68955 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
68956 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
68957 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
68958 diff -urNp linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c
68959 --- linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
68960 +++ linux-2.6.32.42/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
68961 @@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
68962 static int
68963 ctnetlink_parse_tuple(const struct nlattr * const cda[],
68964 struct nf_conntrack_tuple *tuple,
68965 - enum ctattr_tuple type, u_int8_t l3num)
68966 + enum ctattr_type type, u_int8_t l3num)
68967 {
68968 struct nlattr *tb[CTA_TUPLE_MAX+1];
68969 int err;
68970 diff -urNp linux-2.6.32.42/net/netfilter/nfnetlink_log.c linux-2.6.32.42/net/netfilter/nfnetlink_log.c
68971 --- linux-2.6.32.42/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
68972 +++ linux-2.6.32.42/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
68973 @@ -68,7 +68,7 @@ struct nfulnl_instance {
68974 };
68975
68976 static DEFINE_RWLOCK(instances_lock);
68977 -static atomic_t global_seq;
68978 +static atomic_unchecked_t global_seq;
68979
68980 #define INSTANCE_BUCKETS 16
68981 static struct hlist_head instance_table[INSTANCE_BUCKETS];
68982 @@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
68983 /* global sequence number */
68984 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
68985 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
68986 - htonl(atomic_inc_return(&global_seq)));
68987 + htonl(atomic_inc_return_unchecked(&global_seq)));
68988
68989 if (data_len) {
68990 struct nlattr *nla;
68991 diff -urNp linux-2.6.32.42/net/netfilter/xt_gradm.c linux-2.6.32.42/net/netfilter/xt_gradm.c
68992 --- linux-2.6.32.42/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
68993 +++ linux-2.6.32.42/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
68994 @@ -0,0 +1,51 @@
68995 +/*
68996 + * gradm match for netfilter
68997 + * Copyright © Zbigniew Krzystolik, 2010
68998 + *
68999 + * This program is free software; you can redistribute it and/or modify
69000 + * it under the terms of the GNU General Public License; either version
69001 + * 2 or 3 as published by the Free Software Foundation.
69002 + */
69003 +#include <linux/module.h>
69004 +#include <linux/moduleparam.h>
69005 +#include <linux/skbuff.h>
69006 +#include <linux/netfilter/x_tables.h>
69007 +#include <linux/grsecurity.h>
69008 +#include <linux/netfilter/xt_gradm.h>
69009 +
69010 +static bool
69011 +gradm_mt(const struct sk_buff *skb, const struct xt_match_param *par)
69012 +{
69013 + const struct xt_gradm_mtinfo *info = par->matchinfo;
69014 + bool retval = false;
69015 + if (gr_acl_is_enabled())
69016 + retval = true;
69017 + return retval ^ info->invflags;
69018 +}
69019 +
69020 +static struct xt_match gradm_mt_reg __read_mostly = {
69021 + .name = "gradm",
69022 + .revision = 0,
69023 + .family = NFPROTO_UNSPEC,
69024 + .match = gradm_mt,
69025 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
69026 + .me = THIS_MODULE,
69027 +};
69028 +
69029 +static int __init gradm_mt_init(void)
69030 +{
69031 + return xt_register_match(&gradm_mt_reg);
69032 +}
69033 +
69034 +static void __exit gradm_mt_exit(void)
69035 +{
69036 + xt_unregister_match(&gradm_mt_reg);
69037 +}
69038 +
69039 +module_init(gradm_mt_init);
69040 +module_exit(gradm_mt_exit);
69041 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
69042 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
69043 +MODULE_LICENSE("GPL");
69044 +MODULE_ALIAS("ipt_gradm");
69045 +MODULE_ALIAS("ip6t_gradm");
69046 diff -urNp linux-2.6.32.42/net/netlink/af_netlink.c linux-2.6.32.42/net/netlink/af_netlink.c
69047 --- linux-2.6.32.42/net/netlink/af_netlink.c 2011-03-27 14:31:47.000000000 -0400
69048 +++ linux-2.6.32.42/net/netlink/af_netlink.c 2011-05-04 17:56:28.000000000 -0400
69049 @@ -733,7 +733,7 @@ static void netlink_overrun(struct sock
69050 sk->sk_error_report(sk);
69051 }
69052 }
69053 - atomic_inc(&sk->sk_drops);
69054 + atomic_inc_unchecked(&sk->sk_drops);
69055 }
69056
69057 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
69058 @@ -1964,15 +1964,23 @@ static int netlink_seq_show(struct seq_f
69059 struct netlink_sock *nlk = nlk_sk(s);
69060
69061 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d\n",
69062 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69063 + NULL,
69064 +#else
69065 s,
69066 +#endif
69067 s->sk_protocol,
69068 nlk->pid,
69069 nlk->groups ? (u32)nlk->groups[0] : 0,
69070 sk_rmem_alloc_get(s),
69071 sk_wmem_alloc_get(s),
69072 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69073 + NULL,
69074 +#else
69075 nlk->cb,
69076 +#endif
69077 atomic_read(&s->sk_refcnt),
69078 - atomic_read(&s->sk_drops)
69079 + atomic_read_unchecked(&s->sk_drops)
69080 );
69081
69082 }
69083 diff -urNp linux-2.6.32.42/net/netrom/af_netrom.c linux-2.6.32.42/net/netrom/af_netrom.c
69084 --- linux-2.6.32.42/net/netrom/af_netrom.c 2011-03-27 14:31:47.000000000 -0400
69085 +++ linux-2.6.32.42/net/netrom/af_netrom.c 2011-04-17 15:56:46.000000000 -0400
69086 @@ -838,6 +838,7 @@ static int nr_getname(struct socket *soc
69087 struct sock *sk = sock->sk;
69088 struct nr_sock *nr = nr_sk(sk);
69089
69090 + memset(sax, 0, sizeof(*sax));
69091 lock_sock(sk);
69092 if (peer != 0) {
69093 if (sk->sk_state != TCP_ESTABLISHED) {
69094 @@ -852,7 +853,6 @@ static int nr_getname(struct socket *soc
69095 *uaddr_len = sizeof(struct full_sockaddr_ax25);
69096 } else {
69097 sax->fsa_ax25.sax25_family = AF_NETROM;
69098 - sax->fsa_ax25.sax25_ndigis = 0;
69099 sax->fsa_ax25.sax25_call = nr->source_addr;
69100 *uaddr_len = sizeof(struct sockaddr_ax25);
69101 }
69102 diff -urNp linux-2.6.32.42/net/packet/af_packet.c linux-2.6.32.42/net/packet/af_packet.c
69103 --- linux-2.6.32.42/net/packet/af_packet.c 2011-04-17 17:00:52.000000000 -0400
69104 +++ linux-2.6.32.42/net/packet/af_packet.c 2011-04-17 15:56:46.000000000 -0400
69105 @@ -2427,7 +2427,11 @@ static int packet_seq_show(struct seq_fi
69106
69107 seq_printf(seq,
69108 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
69109 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69110 + NULL,
69111 +#else
69112 s,
69113 +#endif
69114 atomic_read(&s->sk_refcnt),
69115 s->sk_type,
69116 ntohs(po->num),
69117 diff -urNp linux-2.6.32.42/net/phonet/af_phonet.c linux-2.6.32.42/net/phonet/af_phonet.c
69118 --- linux-2.6.32.42/net/phonet/af_phonet.c 2011-03-27 14:31:47.000000000 -0400
69119 +++ linux-2.6.32.42/net/phonet/af_phonet.c 2011-04-17 15:56:46.000000000 -0400
69120 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
69121 {
69122 struct phonet_protocol *pp;
69123
69124 - if (protocol >= PHONET_NPROTO)
69125 + if (protocol < 0 || protocol >= PHONET_NPROTO)
69126 return NULL;
69127
69128 spin_lock(&proto_tab_lock);
69129 @@ -402,7 +402,7 @@ int __init_or_module phonet_proto_regist
69130 {
69131 int err = 0;
69132
69133 - if (protocol >= PHONET_NPROTO)
69134 + if (protocol < 0 || protocol >= PHONET_NPROTO)
69135 return -EINVAL;
69136
69137 err = proto_register(pp->prot, 1);
69138 diff -urNp linux-2.6.32.42/net/phonet/datagram.c linux-2.6.32.42/net/phonet/datagram.c
69139 --- linux-2.6.32.42/net/phonet/datagram.c 2011-03-27 14:31:47.000000000 -0400
69140 +++ linux-2.6.32.42/net/phonet/datagram.c 2011-05-04 17:56:28.000000000 -0400
69141 @@ -162,7 +162,7 @@ static int pn_backlog_rcv(struct sock *s
69142 if (err < 0) {
69143 kfree_skb(skb);
69144 if (err == -ENOMEM)
69145 - atomic_inc(&sk->sk_drops);
69146 + atomic_inc_unchecked(&sk->sk_drops);
69147 }
69148 return err ? NET_RX_DROP : NET_RX_SUCCESS;
69149 }
69150 diff -urNp linux-2.6.32.42/net/phonet/pep.c linux-2.6.32.42/net/phonet/pep.c
69151 --- linux-2.6.32.42/net/phonet/pep.c 2011-03-27 14:31:47.000000000 -0400
69152 +++ linux-2.6.32.42/net/phonet/pep.c 2011-05-04 17:56:28.000000000 -0400
69153 @@ -348,7 +348,7 @@ static int pipe_do_rcv(struct sock *sk,
69154
69155 case PNS_PEP_CTRL_REQ:
69156 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
69157 - atomic_inc(&sk->sk_drops);
69158 + atomic_inc_unchecked(&sk->sk_drops);
69159 break;
69160 }
69161 __skb_pull(skb, 4);
69162 @@ -362,12 +362,12 @@ static int pipe_do_rcv(struct sock *sk,
69163 if (!err)
69164 return 0;
69165 if (err == -ENOMEM)
69166 - atomic_inc(&sk->sk_drops);
69167 + atomic_inc_unchecked(&sk->sk_drops);
69168 break;
69169 }
69170
69171 if (pn->rx_credits == 0) {
69172 - atomic_inc(&sk->sk_drops);
69173 + atomic_inc_unchecked(&sk->sk_drops);
69174 err = -ENOBUFS;
69175 break;
69176 }
69177 diff -urNp linux-2.6.32.42/net/phonet/socket.c linux-2.6.32.42/net/phonet/socket.c
69178 --- linux-2.6.32.42/net/phonet/socket.c 2011-03-27 14:31:47.000000000 -0400
69179 +++ linux-2.6.32.42/net/phonet/socket.c 2011-05-04 17:57:07.000000000 -0400
69180 @@ -482,8 +482,13 @@ static int pn_sock_seq_show(struct seq_f
69181 sk->sk_state,
69182 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
69183 sock_i_uid(sk), sock_i_ino(sk),
69184 - atomic_read(&sk->sk_refcnt), sk,
69185 - atomic_read(&sk->sk_drops), &len);
69186 + atomic_read(&sk->sk_refcnt),
69187 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69188 + NULL,
69189 +#else
69190 + sk,
69191 +#endif
69192 + atomic_read_unchecked(&sk->sk_drops), &len);
69193 }
69194 seq_printf(seq, "%*s\n", 127 - len, "");
69195 return 0;
69196 diff -urNp linux-2.6.32.42/net/rds/cong.c linux-2.6.32.42/net/rds/cong.c
69197 --- linux-2.6.32.42/net/rds/cong.c 2011-03-27 14:31:47.000000000 -0400
69198 +++ linux-2.6.32.42/net/rds/cong.c 2011-05-04 17:56:28.000000000 -0400
69199 @@ -77,7 +77,7 @@
69200 * finds that the saved generation number is smaller than the global generation
69201 * number, it wakes up the process.
69202 */
69203 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
69204 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
69205
69206 /*
69207 * Congestion monitoring
69208 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
69209 rdsdebug("waking map %p for %pI4\n",
69210 map, &map->m_addr);
69211 rds_stats_inc(s_cong_update_received);
69212 - atomic_inc(&rds_cong_generation);
69213 + atomic_inc_unchecked(&rds_cong_generation);
69214 if (waitqueue_active(&map->m_waitq))
69215 wake_up(&map->m_waitq);
69216 if (waitqueue_active(&rds_poll_waitq))
69217 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
69218
69219 int rds_cong_updated_since(unsigned long *recent)
69220 {
69221 - unsigned long gen = atomic_read(&rds_cong_generation);
69222 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
69223
69224 if (likely(*recent == gen))
69225 return 0;
69226 diff -urNp linux-2.6.32.42/net/rds/iw_rdma.c linux-2.6.32.42/net/rds/iw_rdma.c
69227 --- linux-2.6.32.42/net/rds/iw_rdma.c 2011-03-27 14:31:47.000000000 -0400
69228 +++ linux-2.6.32.42/net/rds/iw_rdma.c 2011-05-16 21:46:57.000000000 -0400
69229 @@ -181,6 +181,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
69230 struct rdma_cm_id *pcm_id;
69231 int rc;
69232
69233 + pax_track_stack();
69234 +
69235 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
69236 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
69237
69238 diff -urNp linux-2.6.32.42/net/rds/Kconfig linux-2.6.32.42/net/rds/Kconfig
69239 --- linux-2.6.32.42/net/rds/Kconfig 2011-03-27 14:31:47.000000000 -0400
69240 +++ linux-2.6.32.42/net/rds/Kconfig 2011-04-17 15:56:46.000000000 -0400
69241 @@ -1,7 +1,7 @@
69242
69243 config RDS
69244 tristate "The RDS Protocol (EXPERIMENTAL)"
69245 - depends on INET && EXPERIMENTAL
69246 + depends on INET && EXPERIMENTAL && BROKEN
69247 ---help---
69248 The RDS (Reliable Datagram Sockets) protocol provides reliable,
69249 sequenced delivery of datagrams over Infiniband, iWARP,
69250 diff -urNp linux-2.6.32.42/net/rxrpc/af_rxrpc.c linux-2.6.32.42/net/rxrpc/af_rxrpc.c
69251 --- linux-2.6.32.42/net/rxrpc/af_rxrpc.c 2011-03-27 14:31:47.000000000 -0400
69252 +++ linux-2.6.32.42/net/rxrpc/af_rxrpc.c 2011-05-04 17:56:28.000000000 -0400
69253 @@ -38,7 +38,7 @@ static const struct proto_ops rxrpc_rpc_
69254 __be32 rxrpc_epoch;
69255
69256 /* current debugging ID */
69257 -atomic_t rxrpc_debug_id;
69258 +atomic_unchecked_t rxrpc_debug_id;
69259
69260 /* count of skbs currently in use */
69261 atomic_t rxrpc_n_skbs;
69262 diff -urNp linux-2.6.32.42/net/rxrpc/ar-ack.c linux-2.6.32.42/net/rxrpc/ar-ack.c
69263 --- linux-2.6.32.42/net/rxrpc/ar-ack.c 2011-03-27 14:31:47.000000000 -0400
69264 +++ linux-2.6.32.42/net/rxrpc/ar-ack.c 2011-05-16 21:46:57.000000000 -0400
69265 @@ -174,7 +174,7 @@ static void rxrpc_resend(struct rxrpc_ca
69266
69267 _enter("{%d,%d,%d,%d},",
69268 call->acks_hard, call->acks_unacked,
69269 - atomic_read(&call->sequence),
69270 + atomic_read_unchecked(&call->sequence),
69271 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
69272
69273 stop = 0;
69274 @@ -198,7 +198,7 @@ static void rxrpc_resend(struct rxrpc_ca
69275
69276 /* each Tx packet has a new serial number */
69277 sp->hdr.serial =
69278 - htonl(atomic_inc_return(&call->conn->serial));
69279 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
69280
69281 hdr = (struct rxrpc_header *) txb->head;
69282 hdr->serial = sp->hdr.serial;
69283 @@ -401,7 +401,7 @@ static void rxrpc_rotate_tx_window(struc
69284 */
69285 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
69286 {
69287 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
69288 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
69289 }
69290
69291 /*
69292 @@ -627,7 +627,7 @@ process_further:
69293
69294 latest = ntohl(sp->hdr.serial);
69295 hard = ntohl(ack.firstPacket);
69296 - tx = atomic_read(&call->sequence);
69297 + tx = atomic_read_unchecked(&call->sequence);
69298
69299 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
69300 latest,
69301 @@ -840,6 +840,8 @@ void rxrpc_process_call(struct work_stru
69302 u32 abort_code = RX_PROTOCOL_ERROR;
69303 u8 *acks = NULL;
69304
69305 + pax_track_stack();
69306 +
69307 //printk("\n--------------------\n");
69308 _enter("{%d,%s,%lx} [%lu]",
69309 call->debug_id, rxrpc_call_states[call->state], call->events,
69310 @@ -1159,7 +1161,7 @@ void rxrpc_process_call(struct work_stru
69311 goto maybe_reschedule;
69312
69313 send_ACK_with_skew:
69314 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
69315 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
69316 ntohl(ack.serial));
69317 send_ACK:
69318 mtu = call->conn->trans->peer->if_mtu;
69319 @@ -1171,7 +1173,7 @@ send_ACK:
69320 ackinfo.rxMTU = htonl(5692);
69321 ackinfo.jumbo_max = htonl(4);
69322
69323 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
69324 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
69325 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
69326 ntohl(hdr.serial),
69327 ntohs(ack.maxSkew),
69328 @@ -1189,7 +1191,7 @@ send_ACK:
69329 send_message:
69330 _debug("send message");
69331
69332 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
69333 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
69334 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
69335 send_message_2:
69336
69337 diff -urNp linux-2.6.32.42/net/rxrpc/ar-call.c linux-2.6.32.42/net/rxrpc/ar-call.c
69338 --- linux-2.6.32.42/net/rxrpc/ar-call.c 2011-03-27 14:31:47.000000000 -0400
69339 +++ linux-2.6.32.42/net/rxrpc/ar-call.c 2011-05-04 17:56:28.000000000 -0400
69340 @@ -82,7 +82,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
69341 spin_lock_init(&call->lock);
69342 rwlock_init(&call->state_lock);
69343 atomic_set(&call->usage, 1);
69344 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
69345 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69346 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
69347
69348 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
69349 diff -urNp linux-2.6.32.42/net/rxrpc/ar-connection.c linux-2.6.32.42/net/rxrpc/ar-connection.c
69350 --- linux-2.6.32.42/net/rxrpc/ar-connection.c 2011-03-27 14:31:47.000000000 -0400
69351 +++ linux-2.6.32.42/net/rxrpc/ar-connection.c 2011-05-04 17:56:28.000000000 -0400
69352 @@ -205,7 +205,7 @@ static struct rxrpc_connection *rxrpc_al
69353 rwlock_init(&conn->lock);
69354 spin_lock_init(&conn->state_lock);
69355 atomic_set(&conn->usage, 1);
69356 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
69357 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69358 conn->avail_calls = RXRPC_MAXCALLS;
69359 conn->size_align = 4;
69360 conn->header_size = sizeof(struct rxrpc_header);
69361 diff -urNp linux-2.6.32.42/net/rxrpc/ar-connevent.c linux-2.6.32.42/net/rxrpc/ar-connevent.c
69362 --- linux-2.6.32.42/net/rxrpc/ar-connevent.c 2011-03-27 14:31:47.000000000 -0400
69363 +++ linux-2.6.32.42/net/rxrpc/ar-connevent.c 2011-05-04 17:56:28.000000000 -0400
69364 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
69365
69366 len = iov[0].iov_len + iov[1].iov_len;
69367
69368 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
69369 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69370 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
69371
69372 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
69373 diff -urNp linux-2.6.32.42/net/rxrpc/ar-input.c linux-2.6.32.42/net/rxrpc/ar-input.c
69374 --- linux-2.6.32.42/net/rxrpc/ar-input.c 2011-03-27 14:31:47.000000000 -0400
69375 +++ linux-2.6.32.42/net/rxrpc/ar-input.c 2011-05-04 17:56:28.000000000 -0400
69376 @@ -339,9 +339,9 @@ void rxrpc_fast_process_packet(struct rx
69377 /* track the latest serial number on this connection for ACK packet
69378 * information */
69379 serial = ntohl(sp->hdr.serial);
69380 - hi_serial = atomic_read(&call->conn->hi_serial);
69381 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
69382 while (serial > hi_serial)
69383 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
69384 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
69385 serial);
69386
69387 /* request ACK generation for any ACK or DATA packet that requests
69388 diff -urNp linux-2.6.32.42/net/rxrpc/ar-internal.h linux-2.6.32.42/net/rxrpc/ar-internal.h
69389 --- linux-2.6.32.42/net/rxrpc/ar-internal.h 2011-03-27 14:31:47.000000000 -0400
69390 +++ linux-2.6.32.42/net/rxrpc/ar-internal.h 2011-05-04 17:56:28.000000000 -0400
69391 @@ -272,8 +272,8 @@ struct rxrpc_connection {
69392 int error; /* error code for local abort */
69393 int debug_id; /* debug ID for printks */
69394 unsigned call_counter; /* call ID counter */
69395 - atomic_t serial; /* packet serial number counter */
69396 - atomic_t hi_serial; /* highest serial number received */
69397 + atomic_unchecked_t serial; /* packet serial number counter */
69398 + atomic_unchecked_t hi_serial; /* highest serial number received */
69399 u8 avail_calls; /* number of calls available */
69400 u8 size_align; /* data size alignment (for security) */
69401 u8 header_size; /* rxrpc + security header size */
69402 @@ -346,7 +346,7 @@ struct rxrpc_call {
69403 spinlock_t lock;
69404 rwlock_t state_lock; /* lock for state transition */
69405 atomic_t usage;
69406 - atomic_t sequence; /* Tx data packet sequence counter */
69407 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
69408 u32 abort_code; /* local/remote abort code */
69409 enum { /* current state of call */
69410 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
69411 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
69412 */
69413 extern atomic_t rxrpc_n_skbs;
69414 extern __be32 rxrpc_epoch;
69415 -extern atomic_t rxrpc_debug_id;
69416 +extern atomic_unchecked_t rxrpc_debug_id;
69417 extern struct workqueue_struct *rxrpc_workqueue;
69418
69419 /*
69420 diff -urNp linux-2.6.32.42/net/rxrpc/ar-key.c linux-2.6.32.42/net/rxrpc/ar-key.c
69421 --- linux-2.6.32.42/net/rxrpc/ar-key.c 2011-03-27 14:31:47.000000000 -0400
69422 +++ linux-2.6.32.42/net/rxrpc/ar-key.c 2011-04-17 15:56:46.000000000 -0400
69423 @@ -88,11 +88,11 @@ static int rxrpc_instantiate_xdr_rxkad(s
69424 return ret;
69425
69426 plen -= sizeof(*token);
69427 - token = kmalloc(sizeof(*token), GFP_KERNEL);
69428 + token = kzalloc(sizeof(*token), GFP_KERNEL);
69429 if (!token)
69430 return -ENOMEM;
69431
69432 - token->kad = kmalloc(plen, GFP_KERNEL);
69433 + token->kad = kzalloc(plen, GFP_KERNEL);
69434 if (!token->kad) {
69435 kfree(token);
69436 return -ENOMEM;
69437 @@ -730,10 +730,10 @@ static int rxrpc_instantiate(struct key
69438 goto error;
69439
69440 ret = -ENOMEM;
69441 - token = kmalloc(sizeof(*token), GFP_KERNEL);
69442 + token = kzalloc(sizeof(*token), GFP_KERNEL);
69443 if (!token)
69444 goto error;
69445 - token->kad = kmalloc(plen, GFP_KERNEL);
69446 + token->kad = kzalloc(plen, GFP_KERNEL);
69447 if (!token->kad)
69448 goto error_free;
69449
69450 diff -urNp linux-2.6.32.42/net/rxrpc/ar-local.c linux-2.6.32.42/net/rxrpc/ar-local.c
69451 --- linux-2.6.32.42/net/rxrpc/ar-local.c 2011-03-27 14:31:47.000000000 -0400
69452 +++ linux-2.6.32.42/net/rxrpc/ar-local.c 2011-05-04 17:56:28.000000000 -0400
69453 @@ -44,7 +44,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
69454 spin_lock_init(&local->lock);
69455 rwlock_init(&local->services_lock);
69456 atomic_set(&local->usage, 1);
69457 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
69458 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69459 memcpy(&local->srx, srx, sizeof(*srx));
69460 }
69461
69462 diff -urNp linux-2.6.32.42/net/rxrpc/ar-output.c linux-2.6.32.42/net/rxrpc/ar-output.c
69463 --- linux-2.6.32.42/net/rxrpc/ar-output.c 2011-03-27 14:31:47.000000000 -0400
69464 +++ linux-2.6.32.42/net/rxrpc/ar-output.c 2011-05-04 17:56:28.000000000 -0400
69465 @@ -680,9 +680,9 @@ static int rxrpc_send_data(struct kiocb
69466 sp->hdr.cid = call->cid;
69467 sp->hdr.callNumber = call->call_id;
69468 sp->hdr.seq =
69469 - htonl(atomic_inc_return(&call->sequence));
69470 + htonl(atomic_inc_return_unchecked(&call->sequence));
69471 sp->hdr.serial =
69472 - htonl(atomic_inc_return(&conn->serial));
69473 + htonl(atomic_inc_return_unchecked(&conn->serial));
69474 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
69475 sp->hdr.userStatus = 0;
69476 sp->hdr.securityIndex = conn->security_ix;
69477 diff -urNp linux-2.6.32.42/net/rxrpc/ar-peer.c linux-2.6.32.42/net/rxrpc/ar-peer.c
69478 --- linux-2.6.32.42/net/rxrpc/ar-peer.c 2011-03-27 14:31:47.000000000 -0400
69479 +++ linux-2.6.32.42/net/rxrpc/ar-peer.c 2011-05-04 17:56:28.000000000 -0400
69480 @@ -86,7 +86,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
69481 INIT_LIST_HEAD(&peer->error_targets);
69482 spin_lock_init(&peer->lock);
69483 atomic_set(&peer->usage, 1);
69484 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
69485 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69486 memcpy(&peer->srx, srx, sizeof(*srx));
69487
69488 rxrpc_assess_MTU_size(peer);
69489 diff -urNp linux-2.6.32.42/net/rxrpc/ar-proc.c linux-2.6.32.42/net/rxrpc/ar-proc.c
69490 --- linux-2.6.32.42/net/rxrpc/ar-proc.c 2011-03-27 14:31:47.000000000 -0400
69491 +++ linux-2.6.32.42/net/rxrpc/ar-proc.c 2011-05-04 17:56:28.000000000 -0400
69492 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
69493 atomic_read(&conn->usage),
69494 rxrpc_conn_states[conn->state],
69495 key_serial(conn->key),
69496 - atomic_read(&conn->serial),
69497 - atomic_read(&conn->hi_serial));
69498 + atomic_read_unchecked(&conn->serial),
69499 + atomic_read_unchecked(&conn->hi_serial));
69500
69501 return 0;
69502 }
69503 diff -urNp linux-2.6.32.42/net/rxrpc/ar-transport.c linux-2.6.32.42/net/rxrpc/ar-transport.c
69504 --- linux-2.6.32.42/net/rxrpc/ar-transport.c 2011-03-27 14:31:47.000000000 -0400
69505 +++ linux-2.6.32.42/net/rxrpc/ar-transport.c 2011-05-04 17:56:28.000000000 -0400
69506 @@ -46,7 +46,7 @@ static struct rxrpc_transport *rxrpc_all
69507 spin_lock_init(&trans->client_lock);
69508 rwlock_init(&trans->conn_lock);
69509 atomic_set(&trans->usage, 1);
69510 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
69511 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
69512
69513 if (peer->srx.transport.family == AF_INET) {
69514 switch (peer->srx.transport_type) {
69515 diff -urNp linux-2.6.32.42/net/rxrpc/rxkad.c linux-2.6.32.42/net/rxrpc/rxkad.c
69516 --- linux-2.6.32.42/net/rxrpc/rxkad.c 2011-03-27 14:31:47.000000000 -0400
69517 +++ linux-2.6.32.42/net/rxrpc/rxkad.c 2011-05-16 21:46:57.000000000 -0400
69518 @@ -210,6 +210,8 @@ static int rxkad_secure_packet_encrypt(c
69519 u16 check;
69520 int nsg;
69521
69522 + pax_track_stack();
69523 +
69524 sp = rxrpc_skb(skb);
69525
69526 _enter("");
69527 @@ -337,6 +339,8 @@ static int rxkad_verify_packet_auth(cons
69528 u16 check;
69529 int nsg;
69530
69531 + pax_track_stack();
69532 +
69533 _enter("");
69534
69535 sp = rxrpc_skb(skb);
69536 @@ -609,7 +613,7 @@ static int rxkad_issue_challenge(struct
69537
69538 len = iov[0].iov_len + iov[1].iov_len;
69539
69540 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
69541 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69542 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
69543
69544 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
69545 @@ -659,7 +663,7 @@ static int rxkad_send_response(struct rx
69546
69547 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
69548
69549 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
69550 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
69551 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
69552
69553 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
69554 diff -urNp linux-2.6.32.42/net/sctp/proc.c linux-2.6.32.42/net/sctp/proc.c
69555 --- linux-2.6.32.42/net/sctp/proc.c 2011-03-27 14:31:47.000000000 -0400
69556 +++ linux-2.6.32.42/net/sctp/proc.c 2011-04-17 15:56:46.000000000 -0400
69557 @@ -213,7 +213,12 @@ static int sctp_eps_seq_show(struct seq_
69558 sctp_for_each_hentry(epb, node, &head->chain) {
69559 ep = sctp_ep(epb);
69560 sk = epb->sk;
69561 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
69562 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
69563 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69564 + NULL, NULL,
69565 +#else
69566 + ep, sk,
69567 +#endif
69568 sctp_sk(sk)->type, sk->sk_state, hash,
69569 epb->bind_addr.port,
69570 sock_i_uid(sk), sock_i_ino(sk));
69571 @@ -320,7 +325,12 @@ static int sctp_assocs_seq_show(struct s
69572 seq_printf(seq,
69573 "%8p %8p %-3d %-3d %-2d %-4d "
69574 "%4d %8d %8d %7d %5lu %-5d %5d ",
69575 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
69576 +#ifdef CONFIG_GRKERNSEC_HIDESYM
69577 + NULL, NULL,
69578 +#else
69579 + assoc, sk,
69580 +#endif
69581 + sctp_sk(sk)->type, sk->sk_state,
69582 assoc->state, hash,
69583 assoc->assoc_id,
69584 assoc->sndbuf_used,
69585 diff -urNp linux-2.6.32.42/net/sctp/socket.c linux-2.6.32.42/net/sctp/socket.c
69586 --- linux-2.6.32.42/net/sctp/socket.c 2011-03-27 14:31:47.000000000 -0400
69587 +++ linux-2.6.32.42/net/sctp/socket.c 2011-04-23 12:56:11.000000000 -0400
69588 @@ -5802,7 +5802,6 @@ pp_found:
69589 */
69590 int reuse = sk->sk_reuse;
69591 struct sock *sk2;
69592 - struct hlist_node *node;
69593
69594 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
69595 if (pp->fastreuse && sk->sk_reuse &&
69596 diff -urNp linux-2.6.32.42/net/socket.c linux-2.6.32.42/net/socket.c
69597 --- linux-2.6.32.42/net/socket.c 2011-03-27 14:31:47.000000000 -0400
69598 +++ linux-2.6.32.42/net/socket.c 2011-05-16 21:46:57.000000000 -0400
69599 @@ -87,6 +87,7 @@
69600 #include <linux/wireless.h>
69601 #include <linux/nsproxy.h>
69602 #include <linux/magic.h>
69603 +#include <linux/in.h>
69604
69605 #include <asm/uaccess.h>
69606 #include <asm/unistd.h>
69607 @@ -97,6 +98,21 @@
69608 #include <net/sock.h>
69609 #include <linux/netfilter.h>
69610
69611 +extern void gr_attach_curr_ip(const struct sock *sk);
69612 +extern int gr_handle_sock_all(const int family, const int type,
69613 + const int protocol);
69614 +extern int gr_handle_sock_server(const struct sockaddr *sck);
69615 +extern int gr_handle_sock_server_other(const struct sock *sck);
69616 +extern int gr_handle_sock_client(const struct sockaddr *sck);
69617 +extern int gr_search_connect(struct socket * sock,
69618 + struct sockaddr_in * addr);
69619 +extern int gr_search_bind(struct socket * sock,
69620 + struct sockaddr_in * addr);
69621 +extern int gr_search_listen(struct socket * sock);
69622 +extern int gr_search_accept(struct socket * sock);
69623 +extern int gr_search_socket(const int domain, const int type,
69624 + const int protocol);
69625 +
69626 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
69627 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
69628 unsigned long nr_segs, loff_t pos);
69629 @@ -298,7 +314,7 @@ static int sockfs_get_sb(struct file_sys
69630 mnt);
69631 }
69632
69633 -static struct vfsmount *sock_mnt __read_mostly;
69634 +struct vfsmount *sock_mnt __read_mostly;
69635
69636 static struct file_system_type sock_fs_type = {
69637 .name = "sockfs",
69638 @@ -1154,6 +1170,8 @@ static int __sock_create(struct net *net
69639 return -EAFNOSUPPORT;
69640 if (type < 0 || type >= SOCK_MAX)
69641 return -EINVAL;
69642 + if (protocol < 0)
69643 + return -EINVAL;
69644
69645 /* Compatibility.
69646
69647 @@ -1283,6 +1301,16 @@ SYSCALL_DEFINE3(socket, int, family, int
69648 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
69649 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
69650
69651 + if(!gr_search_socket(family, type, protocol)) {
69652 + retval = -EACCES;
69653 + goto out;
69654 + }
69655 +
69656 + if (gr_handle_sock_all(family, type, protocol)) {
69657 + retval = -EACCES;
69658 + goto out;
69659 + }
69660 +
69661 retval = sock_create(family, type, protocol, &sock);
69662 if (retval < 0)
69663 goto out;
69664 @@ -1415,6 +1443,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
69665 if (sock) {
69666 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
69667 if (err >= 0) {
69668 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
69669 + err = -EACCES;
69670 + goto error;
69671 + }
69672 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
69673 + if (err)
69674 + goto error;
69675 +
69676 err = security_socket_bind(sock,
69677 (struct sockaddr *)&address,
69678 addrlen);
69679 @@ -1423,6 +1459,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
69680 (struct sockaddr *)
69681 &address, addrlen);
69682 }
69683 +error:
69684 fput_light(sock->file, fput_needed);
69685 }
69686 return err;
69687 @@ -1446,10 +1483,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
69688 if ((unsigned)backlog > somaxconn)
69689 backlog = somaxconn;
69690
69691 + if (gr_handle_sock_server_other(sock->sk)) {
69692 + err = -EPERM;
69693 + goto error;
69694 + }
69695 +
69696 + err = gr_search_listen(sock);
69697 + if (err)
69698 + goto error;
69699 +
69700 err = security_socket_listen(sock, backlog);
69701 if (!err)
69702 err = sock->ops->listen(sock, backlog);
69703
69704 +error:
69705 fput_light(sock->file, fput_needed);
69706 }
69707 return err;
69708 @@ -1492,6 +1539,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
69709 newsock->type = sock->type;
69710 newsock->ops = sock->ops;
69711
69712 + if (gr_handle_sock_server_other(sock->sk)) {
69713 + err = -EPERM;
69714 + sock_release(newsock);
69715 + goto out_put;
69716 + }
69717 +
69718 + err = gr_search_accept(sock);
69719 + if (err) {
69720 + sock_release(newsock);
69721 + goto out_put;
69722 + }
69723 +
69724 /*
69725 * We don't need try_module_get here, as the listening socket (sock)
69726 * has the protocol module (sock->ops->owner) held.
69727 @@ -1534,6 +1593,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
69728 fd_install(newfd, newfile);
69729 err = newfd;
69730
69731 + gr_attach_curr_ip(newsock->sk);
69732 +
69733 out_put:
69734 fput_light(sock->file, fput_needed);
69735 out:
69736 @@ -1571,6 +1632,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
69737 int, addrlen)
69738 {
69739 struct socket *sock;
69740 + struct sockaddr *sck;
69741 struct sockaddr_storage address;
69742 int err, fput_needed;
69743
69744 @@ -1581,6 +1643,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
69745 if (err < 0)
69746 goto out_put;
69747
69748 + sck = (struct sockaddr *)&address;
69749 +
69750 + if (gr_handle_sock_client(sck)) {
69751 + err = -EACCES;
69752 + goto out_put;
69753 + }
69754 +
69755 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
69756 + if (err)
69757 + goto out_put;
69758 +
69759 err =
69760 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
69761 if (err)
69762 @@ -1882,6 +1955,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
69763 int err, ctl_len, iov_size, total_len;
69764 int fput_needed;
69765
69766 + pax_track_stack();
69767 +
69768 err = -EFAULT;
69769 if (MSG_CMSG_COMPAT & flags) {
69770 if (get_compat_msghdr(&msg_sys, msg_compat))
69771 diff -urNp linux-2.6.32.42/net/sunrpc/sched.c linux-2.6.32.42/net/sunrpc/sched.c
69772 --- linux-2.6.32.42/net/sunrpc/sched.c 2011-03-27 14:31:47.000000000 -0400
69773 +++ linux-2.6.32.42/net/sunrpc/sched.c 2011-04-17 15:56:46.000000000 -0400
69774 @@ -234,10 +234,10 @@ static int rpc_wait_bit_killable(void *w
69775 #ifdef RPC_DEBUG
69776 static void rpc_task_set_debuginfo(struct rpc_task *task)
69777 {
69778 - static atomic_t rpc_pid;
69779 + static atomic_unchecked_t rpc_pid;
69780
69781 task->tk_magic = RPC_TASK_MAGIC_ID;
69782 - task->tk_pid = atomic_inc_return(&rpc_pid);
69783 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
69784 }
69785 #else
69786 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
69787 diff -urNp linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma.c
69788 --- linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma.c 2011-03-27 14:31:47.000000000 -0400
69789 +++ linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-04 17:56:20.000000000 -0400
69790 @@ -59,15 +59,15 @@ unsigned int svcrdma_max_req_size = RPCR
69791 static unsigned int min_max_inline = 4096;
69792 static unsigned int max_max_inline = 65536;
69793
69794 -atomic_t rdma_stat_recv;
69795 -atomic_t rdma_stat_read;
69796 -atomic_t rdma_stat_write;
69797 -atomic_t rdma_stat_sq_starve;
69798 -atomic_t rdma_stat_rq_starve;
69799 -atomic_t rdma_stat_rq_poll;
69800 -atomic_t rdma_stat_rq_prod;
69801 -atomic_t rdma_stat_sq_poll;
69802 -atomic_t rdma_stat_sq_prod;
69803 +atomic_unchecked_t rdma_stat_recv;
69804 +atomic_unchecked_t rdma_stat_read;
69805 +atomic_unchecked_t rdma_stat_write;
69806 +atomic_unchecked_t rdma_stat_sq_starve;
69807 +atomic_unchecked_t rdma_stat_rq_starve;
69808 +atomic_unchecked_t rdma_stat_rq_poll;
69809 +atomic_unchecked_t rdma_stat_rq_prod;
69810 +atomic_unchecked_t rdma_stat_sq_poll;
69811 +atomic_unchecked_t rdma_stat_sq_prod;
69812
69813 /* Temporary NFS request map and context caches */
69814 struct kmem_cache *svc_rdma_map_cachep;
69815 @@ -105,7 +105,7 @@ static int read_reset_stat(ctl_table *ta
69816 len -= *ppos;
69817 if (len > *lenp)
69818 len = *lenp;
69819 - if (len && copy_to_user(buffer, str_buf, len))
69820 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
69821 return -EFAULT;
69822 *lenp = len;
69823 *ppos += len;
69824 @@ -149,63 +149,63 @@ static ctl_table svcrdma_parm_table[] =
69825 {
69826 .procname = "rdma_stat_read",
69827 .data = &rdma_stat_read,
69828 - .maxlen = sizeof(atomic_t),
69829 + .maxlen = sizeof(atomic_unchecked_t),
69830 .mode = 0644,
69831 .proc_handler = &read_reset_stat,
69832 },
69833 {
69834 .procname = "rdma_stat_recv",
69835 .data = &rdma_stat_recv,
69836 - .maxlen = sizeof(atomic_t),
69837 + .maxlen = sizeof(atomic_unchecked_t),
69838 .mode = 0644,
69839 .proc_handler = &read_reset_stat,
69840 },
69841 {
69842 .procname = "rdma_stat_write",
69843 .data = &rdma_stat_write,
69844 - .maxlen = sizeof(atomic_t),
69845 + .maxlen = sizeof(atomic_unchecked_t),
69846 .mode = 0644,
69847 .proc_handler = &read_reset_stat,
69848 },
69849 {
69850 .procname = "rdma_stat_sq_starve",
69851 .data = &rdma_stat_sq_starve,
69852 - .maxlen = sizeof(atomic_t),
69853 + .maxlen = sizeof(atomic_unchecked_t),
69854 .mode = 0644,
69855 .proc_handler = &read_reset_stat,
69856 },
69857 {
69858 .procname = "rdma_stat_rq_starve",
69859 .data = &rdma_stat_rq_starve,
69860 - .maxlen = sizeof(atomic_t),
69861 + .maxlen = sizeof(atomic_unchecked_t),
69862 .mode = 0644,
69863 .proc_handler = &read_reset_stat,
69864 },
69865 {
69866 .procname = "rdma_stat_rq_poll",
69867 .data = &rdma_stat_rq_poll,
69868 - .maxlen = sizeof(atomic_t),
69869 + .maxlen = sizeof(atomic_unchecked_t),
69870 .mode = 0644,
69871 .proc_handler = &read_reset_stat,
69872 },
69873 {
69874 .procname = "rdma_stat_rq_prod",
69875 .data = &rdma_stat_rq_prod,
69876 - .maxlen = sizeof(atomic_t),
69877 + .maxlen = sizeof(atomic_unchecked_t),
69878 .mode = 0644,
69879 .proc_handler = &read_reset_stat,
69880 },
69881 {
69882 .procname = "rdma_stat_sq_poll",
69883 .data = &rdma_stat_sq_poll,
69884 - .maxlen = sizeof(atomic_t),
69885 + .maxlen = sizeof(atomic_unchecked_t),
69886 .mode = 0644,
69887 .proc_handler = &read_reset_stat,
69888 },
69889 {
69890 .procname = "rdma_stat_sq_prod",
69891 .data = &rdma_stat_sq_prod,
69892 - .maxlen = sizeof(atomic_t),
69893 + .maxlen = sizeof(atomic_unchecked_t),
69894 .mode = 0644,
69895 .proc_handler = &read_reset_stat,
69896 },
69897 diff -urNp linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
69898 --- linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-03-27 14:31:47.000000000 -0400
69899 +++ linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-04 17:56:28.000000000 -0400
69900 @@ -495,7 +495,7 @@ next_sge:
69901 svc_rdma_put_context(ctxt, 0);
69902 goto out;
69903 }
69904 - atomic_inc(&rdma_stat_read);
69905 + atomic_inc_unchecked(&rdma_stat_read);
69906
69907 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
69908 chl_map->ch[ch_no].count -= read_wr.num_sge;
69909 @@ -606,7 +606,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
69910 dto_q);
69911 list_del_init(&ctxt->dto_q);
69912 } else {
69913 - atomic_inc(&rdma_stat_rq_starve);
69914 + atomic_inc_unchecked(&rdma_stat_rq_starve);
69915 clear_bit(XPT_DATA, &xprt->xpt_flags);
69916 ctxt = NULL;
69917 }
69918 @@ -626,7 +626,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
69919 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
69920 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
69921 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
69922 - atomic_inc(&rdma_stat_recv);
69923 + atomic_inc_unchecked(&rdma_stat_recv);
69924
69925 /* Build up the XDR from the receive buffers. */
69926 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
69927 diff -urNp linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_sendto.c
69928 --- linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-03-27 14:31:47.000000000 -0400
69929 +++ linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-04 17:56:28.000000000 -0400
69930 @@ -328,7 +328,7 @@ static int send_write(struct svcxprt_rdm
69931 write_wr.wr.rdma.remote_addr = to;
69932
69933 /* Post It */
69934 - atomic_inc(&rdma_stat_write);
69935 + atomic_inc_unchecked(&rdma_stat_write);
69936 if (svc_rdma_send(xprt, &write_wr))
69937 goto err;
69938 return 0;
69939 diff -urNp linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_transport.c
69940 --- linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-03-27 14:31:47.000000000 -0400
69941 +++ linux-2.6.32.42/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-04 17:56:28.000000000 -0400
69942 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rd
69943 return;
69944
69945 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
69946 - atomic_inc(&rdma_stat_rq_poll);
69947 + atomic_inc_unchecked(&rdma_stat_rq_poll);
69948
69949 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
69950 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
69951 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rd
69952 }
69953
69954 if (ctxt)
69955 - atomic_inc(&rdma_stat_rq_prod);
69956 + atomic_inc_unchecked(&rdma_stat_rq_prod);
69957
69958 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
69959 /*
69960 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rd
69961 return;
69962
69963 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
69964 - atomic_inc(&rdma_stat_sq_poll);
69965 + atomic_inc_unchecked(&rdma_stat_sq_poll);
69966 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
69967 if (wc.status != IB_WC_SUCCESS)
69968 /* Close the transport */
69969 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rd
69970 }
69971
69972 if (ctxt)
69973 - atomic_inc(&rdma_stat_sq_prod);
69974 + atomic_inc_unchecked(&rdma_stat_sq_prod);
69975 }
69976
69977 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
69978 @@ -1260,7 +1260,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
69979 spin_lock_bh(&xprt->sc_lock);
69980 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
69981 spin_unlock_bh(&xprt->sc_lock);
69982 - atomic_inc(&rdma_stat_sq_starve);
69983 + atomic_inc_unchecked(&rdma_stat_sq_starve);
69984
69985 /* See if we can opportunistically reap SQ WR to make room */
69986 sq_cq_reap(xprt);
69987 diff -urNp linux-2.6.32.42/net/sysctl_net.c linux-2.6.32.42/net/sysctl_net.c
69988 --- linux-2.6.32.42/net/sysctl_net.c 2011-03-27 14:31:47.000000000 -0400
69989 +++ linux-2.6.32.42/net/sysctl_net.c 2011-04-17 15:56:46.000000000 -0400
69990 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
69991 struct ctl_table *table)
69992 {
69993 /* Allow network administrator to have same access as root. */
69994 - if (capable(CAP_NET_ADMIN)) {
69995 + if (capable_nolog(CAP_NET_ADMIN)) {
69996 int mode = (table->mode >> 6) & 7;
69997 return (mode << 6) | (mode << 3) | mode;
69998 }
69999 diff -urNp linux-2.6.32.42/net/unix/af_unix.c linux-2.6.32.42/net/unix/af_unix.c
70000 --- linux-2.6.32.42/net/unix/af_unix.c 2011-05-10 22:12:02.000000000 -0400
70001 +++ linux-2.6.32.42/net/unix/af_unix.c 2011-05-10 22:12:34.000000000 -0400
70002 @@ -745,6 +745,12 @@ static struct sock *unix_find_other(stru
70003 err = -ECONNREFUSED;
70004 if (!S_ISSOCK(inode->i_mode))
70005 goto put_fail;
70006 +
70007 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
70008 + err = -EACCES;
70009 + goto put_fail;
70010 + }
70011 +
70012 u = unix_find_socket_byinode(net, inode);
70013 if (!u)
70014 goto put_fail;
70015 @@ -765,6 +771,13 @@ static struct sock *unix_find_other(stru
70016 if (u) {
70017 struct dentry *dentry;
70018 dentry = unix_sk(u)->dentry;
70019 +
70020 + if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
70021 + err = -EPERM;
70022 + sock_put(u);
70023 + goto fail;
70024 + }
70025 +
70026 if (dentry)
70027 touch_atime(unix_sk(u)->mnt, dentry);
70028 } else
70029 @@ -850,11 +863,18 @@ static int unix_bind(struct socket *sock
70030 err = security_path_mknod(&nd.path, dentry, mode, 0);
70031 if (err)
70032 goto out_mknod_drop_write;
70033 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
70034 + err = -EACCES;
70035 + goto out_mknod_drop_write;
70036 + }
70037 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
70038 out_mknod_drop_write:
70039 mnt_drop_write(nd.path.mnt);
70040 if (err)
70041 goto out_mknod_dput;
70042 +
70043 + gr_handle_create(dentry, nd.path.mnt);
70044 +
70045 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
70046 dput(nd.path.dentry);
70047 nd.path.dentry = dentry;
70048 @@ -872,6 +892,10 @@ out_mknod_drop_write:
70049 goto out_unlock;
70050 }
70051
70052 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
70053 + sk->sk_peercred.pid = current->pid;
70054 +#endif
70055 +
70056 list = &unix_socket_table[addr->hash];
70057 } else {
70058 list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
70059 @@ -2211,7 +2235,11 @@ static int unix_seq_show(struct seq_file
70060 unix_state_lock(s);
70061
70062 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
70063 +#ifdef CONFIG_GRKERNSEC_HIDESYM
70064 + NULL,
70065 +#else
70066 s,
70067 +#endif
70068 atomic_read(&s->sk_refcnt),
70069 0,
70070 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
70071 diff -urNp linux-2.6.32.42/net/wireless/wext.c linux-2.6.32.42/net/wireless/wext.c
70072 --- linux-2.6.32.42/net/wireless/wext.c 2011-03-27 14:31:47.000000000 -0400
70073 +++ linux-2.6.32.42/net/wireless/wext.c 2011-04-17 15:56:46.000000000 -0400
70074 @@ -816,8 +816,7 @@ static int ioctl_standard_iw_point(struc
70075 */
70076
70077 /* Support for very large requests */
70078 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
70079 - (user_length > descr->max_tokens)) {
70080 + if (user_length > descr->max_tokens) {
70081 /* Allow userspace to GET more than max so
70082 * we can support any size GET requests.
70083 * There is still a limit : -ENOMEM.
70084 @@ -854,22 +853,6 @@ static int ioctl_standard_iw_point(struc
70085 }
70086 }
70087
70088 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
70089 - /*
70090 - * If this is a GET, but not NOMAX, it means that the extra
70091 - * data is not bounded by userspace, but by max_tokens. Thus
70092 - * set the length to max_tokens. This matches the extra data
70093 - * allocation.
70094 - * The driver should fill it with the number of tokens it
70095 - * provided, and it may check iwp->length rather than having
70096 - * knowledge of max_tokens. If the driver doesn't change the
70097 - * iwp->length, this ioctl just copies back max_token tokens
70098 - * filled with zeroes. Hopefully the driver isn't claiming
70099 - * them to be valid data.
70100 - */
70101 - iwp->length = descr->max_tokens;
70102 - }
70103 -
70104 err = handler(dev, info, (union iwreq_data *) iwp, extra);
70105
70106 iwp->length += essid_compat;
70107 diff -urNp linux-2.6.32.42/net/xfrm/xfrm_policy.c linux-2.6.32.42/net/xfrm/xfrm_policy.c
70108 --- linux-2.6.32.42/net/xfrm/xfrm_policy.c 2011-03-27 14:31:47.000000000 -0400
70109 +++ linux-2.6.32.42/net/xfrm/xfrm_policy.c 2011-05-04 17:56:20.000000000 -0400
70110 @@ -586,7 +586,7 @@ int xfrm_policy_insert(int dir, struct x
70111 hlist_add_head(&policy->bydst, chain);
70112 xfrm_pol_hold(policy);
70113 net->xfrm.policy_count[dir]++;
70114 - atomic_inc(&flow_cache_genid);
70115 + atomic_inc_unchecked(&flow_cache_genid);
70116 if (delpol)
70117 __xfrm_policy_unlink(delpol, dir);
70118 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
70119 @@ -669,7 +669,7 @@ struct xfrm_policy *xfrm_policy_bysel_ct
70120 write_unlock_bh(&xfrm_policy_lock);
70121
70122 if (ret && delete) {
70123 - atomic_inc(&flow_cache_genid);
70124 + atomic_inc_unchecked(&flow_cache_genid);
70125 xfrm_policy_kill(ret);
70126 }
70127 return ret;
70128 @@ -710,7 +710,7 @@ struct xfrm_policy *xfrm_policy_byid(str
70129 write_unlock_bh(&xfrm_policy_lock);
70130
70131 if (ret && delete) {
70132 - atomic_inc(&flow_cache_genid);
70133 + atomic_inc_unchecked(&flow_cache_genid);
70134 xfrm_policy_kill(ret);
70135 }
70136 return ret;
70137 @@ -824,7 +824,7 @@ int xfrm_policy_flush(struct net *net, u
70138 }
70139
70140 }
70141 - atomic_inc(&flow_cache_genid);
70142 + atomic_inc_unchecked(&flow_cache_genid);
70143 out:
70144 write_unlock_bh(&xfrm_policy_lock);
70145 return err;
70146 @@ -1088,7 +1088,7 @@ int xfrm_policy_delete(struct xfrm_polic
70147 write_unlock_bh(&xfrm_policy_lock);
70148 if (pol) {
70149 if (dir < XFRM_POLICY_MAX)
70150 - atomic_inc(&flow_cache_genid);
70151 + atomic_inc_unchecked(&flow_cache_genid);
70152 xfrm_policy_kill(pol);
70153 return 0;
70154 }
70155 @@ -1477,7 +1477,7 @@ free_dst:
70156 goto out;
70157 }
70158
70159 -static int inline
70160 +static inline int
70161 xfrm_dst_alloc_copy(void **target, void *src, int size)
70162 {
70163 if (!*target) {
70164 @@ -1489,7 +1489,7 @@ xfrm_dst_alloc_copy(void **target, void
70165 return 0;
70166 }
70167
70168 -static int inline
70169 +static inline int
70170 xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel)
70171 {
70172 #ifdef CONFIG_XFRM_SUB_POLICY
70173 @@ -1501,7 +1501,7 @@ xfrm_dst_update_parent(struct dst_entry
70174 #endif
70175 }
70176
70177 -static int inline
70178 +static inline int
70179 xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
70180 {
70181 #ifdef CONFIG_XFRM_SUB_POLICY
70182 @@ -1537,7 +1537,7 @@ int __xfrm_lookup(struct net *net, struc
70183 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
70184
70185 restart:
70186 - genid = atomic_read(&flow_cache_genid);
70187 + genid = atomic_read_unchecked(&flow_cache_genid);
70188 policy = NULL;
70189 for (pi = 0; pi < ARRAY_SIZE(pols); pi++)
70190 pols[pi] = NULL;
70191 @@ -1680,7 +1680,7 @@ restart:
70192 goto error;
70193 }
70194 if (nx == -EAGAIN ||
70195 - genid != atomic_read(&flow_cache_genid)) {
70196 + genid != atomic_read_unchecked(&flow_cache_genid)) {
70197 xfrm_pols_put(pols, npols);
70198 goto restart;
70199 }
70200 diff -urNp linux-2.6.32.42/net/xfrm/xfrm_user.c linux-2.6.32.42/net/xfrm/xfrm_user.c
70201 --- linux-2.6.32.42/net/xfrm/xfrm_user.c 2011-03-27 14:31:47.000000000 -0400
70202 +++ linux-2.6.32.42/net/xfrm/xfrm_user.c 2011-05-16 21:46:57.000000000 -0400
70203 @@ -1169,6 +1169,8 @@ static int copy_to_user_tmpl(struct xfrm
70204 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
70205 int i;
70206
70207 + pax_track_stack();
70208 +
70209 if (xp->xfrm_nr == 0)
70210 return 0;
70211
70212 @@ -1784,6 +1786,8 @@ static int xfrm_do_migrate(struct sk_buf
70213 int err;
70214 int n = 0;
70215
70216 + pax_track_stack();
70217 +
70218 if (attrs[XFRMA_MIGRATE] == NULL)
70219 return -EINVAL;
70220
70221 diff -urNp linux-2.6.32.42/samples/kobject/kset-example.c linux-2.6.32.42/samples/kobject/kset-example.c
70222 --- linux-2.6.32.42/samples/kobject/kset-example.c 2011-03-27 14:31:47.000000000 -0400
70223 +++ linux-2.6.32.42/samples/kobject/kset-example.c 2011-04-17 15:56:46.000000000 -0400
70224 @@ -87,7 +87,7 @@ static ssize_t foo_attr_store(struct kob
70225 }
70226
70227 /* Our custom sysfs_ops that we will associate with our ktype later on */
70228 -static struct sysfs_ops foo_sysfs_ops = {
70229 +static const struct sysfs_ops foo_sysfs_ops = {
70230 .show = foo_attr_show,
70231 .store = foo_attr_store,
70232 };
70233 diff -urNp linux-2.6.32.42/scripts/basic/fixdep.c linux-2.6.32.42/scripts/basic/fixdep.c
70234 --- linux-2.6.32.42/scripts/basic/fixdep.c 2011-03-27 14:31:47.000000000 -0400
70235 +++ linux-2.6.32.42/scripts/basic/fixdep.c 2011-04-17 15:56:46.000000000 -0400
70236 @@ -222,9 +222,9 @@ static void use_config(char *m, int slen
70237
70238 static void parse_config_file(char *map, size_t len)
70239 {
70240 - int *end = (int *) (map + len);
70241 + unsigned int *end = (unsigned int *) (map + len);
70242 /* start at +1, so that p can never be < map */
70243 - int *m = (int *) map + 1;
70244 + unsigned int *m = (unsigned int *) map + 1;
70245 char *p, *q;
70246
70247 for (; m < end; m++) {
70248 @@ -371,7 +371,7 @@ static void print_deps(void)
70249 static void traps(void)
70250 {
70251 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
70252 - int *p = (int *)test;
70253 + unsigned int *p = (unsigned int *)test;
70254
70255 if (*p != INT_CONF) {
70256 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
70257 diff -urNp linux-2.6.32.42/scripts/Makefile.build linux-2.6.32.42/scripts/Makefile.build
70258 --- linux-2.6.32.42/scripts/Makefile.build 2011-03-27 14:31:47.000000000 -0400
70259 +++ linux-2.6.32.42/scripts/Makefile.build 2011-06-04 20:46:51.000000000 -0400
70260 @@ -59,7 +59,7 @@ endif
70261 endif
70262
70263 # Do not include host rules unless needed
70264 -ifneq ($(hostprogs-y)$(hostprogs-m),)
70265 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
70266 include scripts/Makefile.host
70267 endif
70268
70269 diff -urNp linux-2.6.32.42/scripts/Makefile.clean linux-2.6.32.42/scripts/Makefile.clean
70270 --- linux-2.6.32.42/scripts/Makefile.clean 2011-03-27 14:31:47.000000000 -0400
70271 +++ linux-2.6.32.42/scripts/Makefile.clean 2011-06-04 20:47:19.000000000 -0400
70272 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
70273 __clean-files := $(extra-y) $(always) \
70274 $(targets) $(clean-files) \
70275 $(host-progs) \
70276 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
70277 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
70278 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
70279
70280 # as clean-files is given relative to the current directory, this adds
70281 # a $(obj) prefix, except for absolute paths
70282 diff -urNp linux-2.6.32.42/scripts/Makefile.host linux-2.6.32.42/scripts/Makefile.host
70283 --- linux-2.6.32.42/scripts/Makefile.host 2011-03-27 14:31:47.000000000 -0400
70284 +++ linux-2.6.32.42/scripts/Makefile.host 2011-06-04 20:48:22.000000000 -0400
70285 @@ -31,6 +31,7 @@
70286 # Note: Shared libraries consisting of C++ files are not supported
70287
70288 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
70289 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
70290
70291 # C code
70292 # Executables compiled from a single .c file
70293 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
70294 # Shared libaries (only .c supported)
70295 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
70296 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
70297 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
70298 # Remove .so files from "xxx-objs"
70299 host-cobjs := $(filter-out %.so,$(host-cobjs))
70300
70301 diff -urNp linux-2.6.32.42/scripts/mod/file2alias.c linux-2.6.32.42/scripts/mod/file2alias.c
70302 --- linux-2.6.32.42/scripts/mod/file2alias.c 2011-03-27 14:31:47.000000000 -0400
70303 +++ linux-2.6.32.42/scripts/mod/file2alias.c 2011-04-17 15:56:46.000000000 -0400
70304 @@ -72,7 +72,7 @@ static void device_id_check(const char *
70305 unsigned long size, unsigned long id_size,
70306 void *symval)
70307 {
70308 - int i;
70309 + unsigned int i;
70310
70311 if (size % id_size || size < id_size) {
70312 if (cross_build != 0)
70313 @@ -102,7 +102,7 @@ static void device_id_check(const char *
70314 /* USB is special because the bcdDevice can be matched against a numeric range */
70315 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
70316 static void do_usb_entry(struct usb_device_id *id,
70317 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
70318 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
70319 unsigned char range_lo, unsigned char range_hi,
70320 struct module *mod)
70321 {
70322 @@ -368,7 +368,7 @@ static void do_pnp_device_entry(void *sy
70323 for (i = 0; i < count; i++) {
70324 const char *id = (char *)devs[i].id;
70325 char acpi_id[sizeof(devs[0].id)];
70326 - int j;
70327 + unsigned int j;
70328
70329 buf_printf(&mod->dev_table_buf,
70330 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
70331 @@ -398,7 +398,7 @@ static void do_pnp_card_entries(void *sy
70332
70333 for (j = 0; j < PNP_MAX_DEVICES; j++) {
70334 const char *id = (char *)card->devs[j].id;
70335 - int i2, j2;
70336 + unsigned int i2, j2;
70337 int dup = 0;
70338
70339 if (!id[0])
70340 @@ -424,7 +424,7 @@ static void do_pnp_card_entries(void *sy
70341 /* add an individual alias for every device entry */
70342 if (!dup) {
70343 char acpi_id[sizeof(card->devs[0].id)];
70344 - int k;
70345 + unsigned int k;
70346
70347 buf_printf(&mod->dev_table_buf,
70348 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
70349 @@ -699,7 +699,7 @@ static void dmi_ascii_filter(char *d, co
70350 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
70351 char *alias)
70352 {
70353 - int i, j;
70354 + unsigned int i, j;
70355
70356 sprintf(alias, "dmi*");
70357
70358 diff -urNp linux-2.6.32.42/scripts/mod/modpost.c linux-2.6.32.42/scripts/mod/modpost.c
70359 --- linux-2.6.32.42/scripts/mod/modpost.c 2011-03-27 14:31:47.000000000 -0400
70360 +++ linux-2.6.32.42/scripts/mod/modpost.c 2011-04-17 15:56:46.000000000 -0400
70361 @@ -835,6 +835,7 @@ enum mismatch {
70362 INIT_TO_EXIT,
70363 EXIT_TO_INIT,
70364 EXPORT_TO_INIT_EXIT,
70365 + DATA_TO_TEXT
70366 };
70367
70368 struct sectioncheck {
70369 @@ -920,6 +921,12 @@ const struct sectioncheck sectioncheck[]
70370 .fromsec = { "__ksymtab*", NULL },
70371 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
70372 .mismatch = EXPORT_TO_INIT_EXIT
70373 +},
70374 +/* Do not reference code from writable data */
70375 +{
70376 + .fromsec = { DATA_SECTIONS, NULL },
70377 + .tosec = { TEXT_SECTIONS, NULL },
70378 + .mismatch = DATA_TO_TEXT
70379 }
70380 };
70381
70382 @@ -1024,10 +1031,10 @@ static Elf_Sym *find_elf_symbol(struct e
70383 continue;
70384 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
70385 continue;
70386 - if (sym->st_value == addr)
70387 - return sym;
70388 /* Find a symbol nearby - addr are maybe negative */
70389 d = sym->st_value - addr;
70390 + if (d == 0)
70391 + return sym;
70392 if (d < 0)
70393 d = addr - sym->st_value;
70394 if (d < distance) {
70395 @@ -1268,6 +1275,14 @@ static void report_sec_mismatch(const ch
70396 "Fix this by removing the %sannotation of %s "
70397 "or drop the export.\n",
70398 tosym, sec2annotation(tosec), sec2annotation(tosec), tosym);
70399 + case DATA_TO_TEXT:
70400 +/*
70401 + fprintf(stderr,
70402 + "The variable %s references\n"
70403 + "the %s %s%s%s\n",
70404 + fromsym, to, sec2annotation(tosec), tosym, to_p);
70405 +*/
70406 + break;
70407 case NO_MISMATCH:
70408 /* To get warnings on missing members */
70409 break;
70410 @@ -1651,7 +1666,7 @@ void __attribute__((format(printf, 2, 3)
70411 va_end(ap);
70412 }
70413
70414 -void buf_write(struct buffer *buf, const char *s, int len)
70415 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
70416 {
70417 if (buf->size - buf->pos < len) {
70418 buf->size += len + SZ;
70419 @@ -1863,7 +1878,7 @@ static void write_if_changed(struct buff
70420 if (fstat(fileno(file), &st) < 0)
70421 goto close_write;
70422
70423 - if (st.st_size != b->pos)
70424 + if (st.st_size != (off_t)b->pos)
70425 goto close_write;
70426
70427 tmp = NOFAIL(malloc(b->pos));
70428 diff -urNp linux-2.6.32.42/scripts/mod/modpost.h linux-2.6.32.42/scripts/mod/modpost.h
70429 --- linux-2.6.32.42/scripts/mod/modpost.h 2011-03-27 14:31:47.000000000 -0400
70430 +++ linux-2.6.32.42/scripts/mod/modpost.h 2011-04-17 15:56:46.000000000 -0400
70431 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
70432
70433 struct buffer {
70434 char *p;
70435 - int pos;
70436 - int size;
70437 + unsigned int pos;
70438 + unsigned int size;
70439 };
70440
70441 void __attribute__((format(printf, 2, 3)))
70442 buf_printf(struct buffer *buf, const char *fmt, ...);
70443
70444 void
70445 -buf_write(struct buffer *buf, const char *s, int len);
70446 +buf_write(struct buffer *buf, const char *s, unsigned int len);
70447
70448 struct module {
70449 struct module *next;
70450 diff -urNp linux-2.6.32.42/scripts/mod/sumversion.c linux-2.6.32.42/scripts/mod/sumversion.c
70451 --- linux-2.6.32.42/scripts/mod/sumversion.c 2011-03-27 14:31:47.000000000 -0400
70452 +++ linux-2.6.32.42/scripts/mod/sumversion.c 2011-04-17 15:56:46.000000000 -0400
70453 @@ -455,7 +455,7 @@ static void write_version(const char *fi
70454 goto out;
70455 }
70456
70457 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
70458 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
70459 warn("writing sum in %s failed: %s\n",
70460 filename, strerror(errno));
70461 goto out;
70462 diff -urNp linux-2.6.32.42/scripts/pnmtologo.c linux-2.6.32.42/scripts/pnmtologo.c
70463 --- linux-2.6.32.42/scripts/pnmtologo.c 2011-03-27 14:31:47.000000000 -0400
70464 +++ linux-2.6.32.42/scripts/pnmtologo.c 2011-04-17 15:56:46.000000000 -0400
70465 @@ -237,14 +237,14 @@ static void write_header(void)
70466 fprintf(out, " * Linux logo %s\n", logoname);
70467 fputs(" */\n\n", out);
70468 fputs("#include <linux/linux_logo.h>\n\n", out);
70469 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
70470 + fprintf(out, "static unsigned char %s_data[] = {\n",
70471 logoname);
70472 }
70473
70474 static void write_footer(void)
70475 {
70476 fputs("\n};\n\n", out);
70477 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
70478 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
70479 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
70480 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
70481 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
70482 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
70483 fputs("\n};\n\n", out);
70484
70485 /* write logo clut */
70486 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
70487 + fprintf(out, "static unsigned char %s_clut[] = {\n",
70488 logoname);
70489 write_hex_cnt = 0;
70490 for (i = 0; i < logo_clutsize; i++) {
70491 diff -urNp linux-2.6.32.42/scripts/tags.sh linux-2.6.32.42/scripts/tags.sh
70492 --- linux-2.6.32.42/scripts/tags.sh 2011-03-27 14:31:47.000000000 -0400
70493 +++ linux-2.6.32.42/scripts/tags.sh 2011-06-07 18:06:04.000000000 -0400
70494 @@ -93,6 +93,11 @@ docscope()
70495 cscope -b -f cscope.out
70496 }
70497
70498 +dogtags()
70499 +{
70500 + all_sources | gtags -f -
70501 +}
70502 +
70503 exuberant()
70504 {
70505 all_sources | xargs $1 -a \
70506 @@ -164,6 +169,10 @@ case "$1" in
70507 docscope
70508 ;;
70509
70510 + "gtags")
70511 + dogtags
70512 + ;;
70513 +
70514 "tags")
70515 rm -f tags
70516 xtags ctags
70517 diff -urNp linux-2.6.32.42/security/capability.c linux-2.6.32.42/security/capability.c
70518 --- linux-2.6.32.42/security/capability.c 2011-03-27 14:31:47.000000000 -0400
70519 +++ linux-2.6.32.42/security/capability.c 2011-04-17 15:56:46.000000000 -0400
70520 @@ -890,7 +890,7 @@ static void cap_audit_rule_free(void *ls
70521 }
70522 #endif /* CONFIG_AUDIT */
70523
70524 -struct security_operations default_security_ops = {
70525 +struct security_operations default_security_ops __read_only = {
70526 .name = "default",
70527 };
70528
70529 diff -urNp linux-2.6.32.42/security/commoncap.c linux-2.6.32.42/security/commoncap.c
70530 --- linux-2.6.32.42/security/commoncap.c 2011-03-27 14:31:47.000000000 -0400
70531 +++ linux-2.6.32.42/security/commoncap.c 2011-04-17 15:56:46.000000000 -0400
70532 @@ -27,7 +27,7 @@
70533 #include <linux/sched.h>
70534 #include <linux/prctl.h>
70535 #include <linux/securebits.h>
70536 -
70537 +#include <net/sock.h>
70538 /*
70539 * If a non-root user executes a setuid-root binary in
70540 * !secure(SECURE_NOROOT) mode, then we raise capabilities.
70541 @@ -50,9 +50,11 @@ static void warn_setuid_and_fcaps_mixed(
70542 }
70543 }
70544
70545 +extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk);
70546 +
70547 int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
70548 {
70549 - NETLINK_CB(skb).eff_cap = current_cap();
70550 + NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk);
70551 return 0;
70552 }
70553
70554 @@ -582,6 +584,9 @@ int cap_bprm_secureexec(struct linux_bin
70555 {
70556 const struct cred *cred = current_cred();
70557
70558 + if (gr_acl_enable_at_secure())
70559 + return 1;
70560 +
70561 if (cred->uid != 0) {
70562 if (bprm->cap_effective)
70563 return 1;
70564 diff -urNp linux-2.6.32.42/security/integrity/ima/ima_api.c linux-2.6.32.42/security/integrity/ima/ima_api.c
70565 --- linux-2.6.32.42/security/integrity/ima/ima_api.c 2011-03-27 14:31:47.000000000 -0400
70566 +++ linux-2.6.32.42/security/integrity/ima/ima_api.c 2011-04-17 15:56:46.000000000 -0400
70567 @@ -74,7 +74,7 @@ void ima_add_violation(struct inode *ino
70568 int result;
70569
70570 /* can overflow, only indicator */
70571 - atomic_long_inc(&ima_htable.violations);
70572 + atomic_long_inc_unchecked(&ima_htable.violations);
70573
70574 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
70575 if (!entry) {
70576 diff -urNp linux-2.6.32.42/security/integrity/ima/ima_fs.c linux-2.6.32.42/security/integrity/ima/ima_fs.c
70577 --- linux-2.6.32.42/security/integrity/ima/ima_fs.c 2011-03-27 14:31:47.000000000 -0400
70578 +++ linux-2.6.32.42/security/integrity/ima/ima_fs.c 2011-04-17 15:56:46.000000000 -0400
70579 @@ -27,12 +27,12 @@
70580 static int valid_policy = 1;
70581 #define TMPBUFLEN 12
70582 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
70583 - loff_t *ppos, atomic_long_t *val)
70584 + loff_t *ppos, atomic_long_unchecked_t *val)
70585 {
70586 char tmpbuf[TMPBUFLEN];
70587 ssize_t len;
70588
70589 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
70590 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
70591 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
70592 }
70593
70594 diff -urNp linux-2.6.32.42/security/integrity/ima/ima.h linux-2.6.32.42/security/integrity/ima/ima.h
70595 --- linux-2.6.32.42/security/integrity/ima/ima.h 2011-03-27 14:31:47.000000000 -0400
70596 +++ linux-2.6.32.42/security/integrity/ima/ima.h 2011-04-17 15:56:46.000000000 -0400
70597 @@ -84,8 +84,8 @@ void ima_add_violation(struct inode *ino
70598 extern spinlock_t ima_queue_lock;
70599
70600 struct ima_h_table {
70601 - atomic_long_t len; /* number of stored measurements in the list */
70602 - atomic_long_t violations;
70603 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
70604 + atomic_long_unchecked_t violations;
70605 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
70606 };
70607 extern struct ima_h_table ima_htable;
70608 diff -urNp linux-2.6.32.42/security/integrity/ima/ima_queue.c linux-2.6.32.42/security/integrity/ima/ima_queue.c
70609 --- linux-2.6.32.42/security/integrity/ima/ima_queue.c 2011-03-27 14:31:47.000000000 -0400
70610 +++ linux-2.6.32.42/security/integrity/ima/ima_queue.c 2011-04-17 15:56:46.000000000 -0400
70611 @@ -78,7 +78,7 @@ static int ima_add_digest_entry(struct i
70612 INIT_LIST_HEAD(&qe->later);
70613 list_add_tail_rcu(&qe->later, &ima_measurements);
70614
70615 - atomic_long_inc(&ima_htable.len);
70616 + atomic_long_inc_unchecked(&ima_htable.len);
70617 key = ima_hash_key(entry->digest);
70618 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
70619 return 0;
70620 diff -urNp linux-2.6.32.42/security/Kconfig linux-2.6.32.42/security/Kconfig
70621 --- linux-2.6.32.42/security/Kconfig 2011-03-27 14:31:47.000000000 -0400
70622 +++ linux-2.6.32.42/security/Kconfig 2011-06-04 20:45:36.000000000 -0400
70623 @@ -4,6 +4,555 @@
70624
70625 menu "Security options"
70626
70627 +source grsecurity/Kconfig
70628 +
70629 +menu "PaX"
70630 +
70631 + config ARCH_TRACK_EXEC_LIMIT
70632 + bool
70633 +
70634 + config PAX_PER_CPU_PGD
70635 + bool
70636 +
70637 + config TASK_SIZE_MAX_SHIFT
70638 + int
70639 + depends on X86_64
70640 + default 47 if !PAX_PER_CPU_PGD
70641 + default 42 if PAX_PER_CPU_PGD
70642 +
70643 + config PAX_ENABLE_PAE
70644 + bool
70645 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
70646 +
70647 +config PAX
70648 + bool "Enable various PaX features"
70649 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
70650 + help
70651 + This allows you to enable various PaX features. PaX adds
70652 + intrusion prevention mechanisms to the kernel that reduce
70653 + the risks posed by exploitable memory corruption bugs.
70654 +
70655 +menu "PaX Control"
70656 + depends on PAX
70657 +
70658 +config PAX_SOFTMODE
70659 + bool 'Support soft mode'
70660 + select PAX_PT_PAX_FLAGS
70661 + help
70662 + Enabling this option will allow you to run PaX in soft mode, that
70663 + is, PaX features will not be enforced by default, only on executables
70664 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
70665 + is the only way to mark executables for soft mode use.
70666 +
70667 + Soft mode can be activated by using the "pax_softmode=1" kernel command
70668 + line option on boot. Furthermore you can control various PaX features
70669 + at runtime via the entries in /proc/sys/kernel/pax.
70670 +
70671 +config PAX_EI_PAX
70672 + bool 'Use legacy ELF header marking'
70673 + help
70674 + Enabling this option will allow you to control PaX features on
70675 + a per executable basis via the 'chpax' utility available at
70676 + http://pax.grsecurity.net/. The control flags will be read from
70677 + an otherwise reserved part of the ELF header. This marking has
70678 + numerous drawbacks (no support for soft-mode, toolchain does not
70679 + know about the non-standard use of the ELF header) therefore it
70680 + has been deprecated in favour of PT_PAX_FLAGS support.
70681 +
70682 + Note that if you enable PT_PAX_FLAGS marking support as well,
70683 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
70684 +
70685 +config PAX_PT_PAX_FLAGS
70686 + bool 'Use ELF program header marking'
70687 + help
70688 + Enabling this option will allow you to control PaX features on
70689 + a per executable basis via the 'paxctl' utility available at
70690 + http://pax.grsecurity.net/. The control flags will be read from
70691 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
70692 + has the benefits of supporting both soft mode and being fully
70693 + integrated into the toolchain (the binutils patch is available
70694 + from http://pax.grsecurity.net).
70695 +
70696 + If your toolchain does not support PT_PAX_FLAGS markings,
70697 + you can create one in most cases with 'paxctl -C'.
70698 +
70699 + Note that if you enable the legacy EI_PAX marking support as well,
70700 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
70701 +
70702 +choice
70703 + prompt 'MAC system integration'
70704 + default PAX_HAVE_ACL_FLAGS
70705 + help
70706 + Mandatory Access Control systems have the option of controlling
70707 + PaX flags on a per executable basis, choose the method supported
70708 + by your particular system.
70709 +
70710 + - "none": if your MAC system does not interact with PaX,
70711 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
70712 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
70713 +
70714 + NOTE: this option is for developers/integrators only.
70715 +
70716 + config PAX_NO_ACL_FLAGS
70717 + bool 'none'
70718 +
70719 + config PAX_HAVE_ACL_FLAGS
70720 + bool 'direct'
70721 +
70722 + config PAX_HOOK_ACL_FLAGS
70723 + bool 'hook'
70724 +endchoice
70725 +
70726 +endmenu
70727 +
70728 +menu "Non-executable pages"
70729 + depends on PAX
70730 +
70731 +config PAX_NOEXEC
70732 + bool "Enforce non-executable pages"
70733 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
70734 + help
70735 + By design some architectures do not allow for protecting memory
70736 + pages against execution or even if they do, Linux does not make
70737 + use of this feature. In practice this means that if a page is
70738 + readable (such as the stack or heap) it is also executable.
70739 +
70740 + There is a well known exploit technique that makes use of this
70741 + fact and a common programming mistake where an attacker can
70742 + introduce code of his choice somewhere in the attacked program's
70743 + memory (typically the stack or the heap) and then execute it.
70744 +
70745 + If the attacked program was running with different (typically
70746 + higher) privileges than that of the attacker, then he can elevate
70747 + his own privilege level (e.g. get a root shell, write to files for
70748 + which he does not have write access to, etc).
70749 +
70750 + Enabling this option will let you choose from various features
70751 + that prevent the injection and execution of 'foreign' code in
70752 + a program.
70753 +
70754 + This will also break programs that rely on the old behaviour and
70755 + expect that dynamically allocated memory via the malloc() family
70756 + of functions is executable (which it is not). Notable examples
70757 + are the XFree86 4.x server, the java runtime and wine.
70758 +
70759 +config PAX_PAGEEXEC
70760 + bool "Paging based non-executable pages"
70761 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
70762 + select S390_SWITCH_AMODE if S390
70763 + select S390_EXEC_PROTECT if S390
70764 + select ARCH_TRACK_EXEC_LIMIT if X86_32
70765 + help
70766 + This implementation is based on the paging feature of the CPU.
70767 + On i386 without hardware non-executable bit support there is a
70768 + variable but usually low performance impact, however on Intel's
70769 + P4 core based CPUs it is very high so you should not enable this
70770 + for kernels meant to be used on such CPUs.
70771 +
70772 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
70773 + with hardware non-executable bit support there is no performance
70774 + impact, on ppc the impact is negligible.
70775 +
70776 + Note that several architectures require various emulations due to
70777 + badly designed userland ABIs, this will cause a performance impact
70778 + but will disappear as soon as userland is fixed. For example, ppc
70779 + userland MUST have been built with secure-plt by a recent toolchain.
70780 +
70781 +config PAX_SEGMEXEC
70782 + bool "Segmentation based non-executable pages"
70783 + depends on PAX_NOEXEC && X86_32
70784 + help
70785 + This implementation is based on the segmentation feature of the
70786 + CPU and has a very small performance impact, however applications
70787 + will be limited to a 1.5 GB address space instead of the normal
70788 + 3 GB.
70789 +
70790 +config PAX_EMUTRAMP
70791 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
70792 + default y if PARISC
70793 + help
70794 + There are some programs and libraries that for one reason or
70795 + another attempt to execute special small code snippets from
70796 + non-executable memory pages. Most notable examples are the
70797 + signal handler return code generated by the kernel itself and
70798 + the GCC trampolines.
70799 +
70800 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
70801 + such programs will no longer work under your kernel.
70802 +
70803 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
70804 + utilities to enable trampoline emulation for the affected programs
70805 + yet still have the protection provided by the non-executable pages.
70806 +
70807 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
70808 + your system will not even boot.
70809 +
70810 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
70811 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
70812 + for the affected files.
70813 +
70814 + NOTE: enabling this feature *may* open up a loophole in the
70815 + protection provided by non-executable pages that an attacker
70816 + could abuse. Therefore the best solution is to not have any
70817 + files on your system that would require this option. This can
70818 + be achieved by not using libc5 (which relies on the kernel
70819 + signal handler return code) and not using or rewriting programs
70820 + that make use of the nested function implementation of GCC.
70821 + Skilled users can just fix GCC itself so that it implements
70822 + nested function calls in a way that does not interfere with PaX.
70823 +
70824 +config PAX_EMUSIGRT
70825 + bool "Automatically emulate sigreturn trampolines"
70826 + depends on PAX_EMUTRAMP && PARISC
70827 + default y
70828 + help
70829 + Enabling this option will have the kernel automatically detect
70830 + and emulate signal return trampolines executing on the stack
70831 + that would otherwise lead to task termination.
70832 +
70833 + This solution is intended as a temporary one for users with
70834 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
70835 + Modula-3 runtime, etc) or executables linked to such, basically
70836 + everything that does not specify its own SA_RESTORER function in
70837 + normal executable memory like glibc 2.1+ does.
70838 +
70839 + On parisc you MUST enable this option, otherwise your system will
70840 + not even boot.
70841 +
70842 + NOTE: this feature cannot be disabled on a per executable basis
70843 + and since it *does* open up a loophole in the protection provided
70844 + by non-executable pages, the best solution is to not have any
70845 + files on your system that would require this option.
70846 +
70847 +config PAX_MPROTECT
70848 + bool "Restrict mprotect()"
70849 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
70850 + help
70851 + Enabling this option will prevent programs from
70852 + - changing the executable status of memory pages that were
70853 + not originally created as executable,
70854 + - making read-only executable pages writable again,
70855 + - creating executable pages from anonymous memory,
70856 + - making read-only-after-relocations (RELRO) data pages writable again.
70857 +
70858 + You should say Y here to complete the protection provided by
70859 + the enforcement of non-executable pages.
70860 +
70861 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
70862 + this feature on a per file basis.
70863 +
70864 +config PAX_MPROTECT_COMPAT
70865 + bool "Use legacy/compat protection demoting (read help)"
70866 + depends on PAX_MPROTECT
70867 + default n
70868 + help
70869 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
70870 + by sending the proper error code to the application. For some broken
70871 + userland, this can cause problems with Python or other applications. The
70872 + current implementation however allows for applications like clamav to
70873 + detect if JIT compilation/execution is allowed and to fall back gracefully
70874 + to an interpreter-based mode if it does not. While we encourage everyone
70875 + to use the current implementation as-is and push upstream to fix broken
70876 + userland (note that the RWX logging option can assist with this), in some
70877 + environments this may not be possible. Having to disable MPROTECT
70878 + completely on certain binaries reduces the security benefit of PaX,
70879 + so this option is provided for those environments to revert to the old
70880 + behavior.
70881 +
70882 +config PAX_ELFRELOCS
70883 + bool "Allow ELF text relocations (read help)"
70884 + depends on PAX_MPROTECT
70885 + default n
70886 + help
70887 + Non-executable pages and mprotect() restrictions are effective
70888 + in preventing the introduction of new executable code into an
70889 + attacked task's address space. There remain only two venues
70890 + for this kind of attack: if the attacker can execute already
70891 + existing code in the attacked task then he can either have it
70892 + create and mmap() a file containing his code or have it mmap()
70893 + an already existing ELF library that does not have position
70894 + independent code in it and use mprotect() on it to make it
70895 + writable and copy his code there. While protecting against
70896 + the former approach is beyond PaX, the latter can be prevented
70897 + by having only PIC ELF libraries on one's system (which do not
70898 + need to relocate their code). If you are sure this is your case,
70899 + as is the case with all modern Linux distributions, then leave
70900 + this option disabled. You should say 'n' here.
70901 +
70902 +config PAX_ETEXECRELOCS
70903 + bool "Allow ELF ET_EXEC text relocations"
70904 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
70905 + select PAX_ELFRELOCS
70906 + default y
70907 + help
70908 + On some architectures there are incorrectly created applications
70909 + that require text relocations and would not work without enabling
70910 + this option. If you are an alpha, ia64 or parisc user, you should
70911 + enable this option and disable it once you have made sure that
70912 + none of your applications need it.
70913 +
70914 +config PAX_EMUPLT
70915 + bool "Automatically emulate ELF PLT"
70916 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
70917 + default y
70918 + help
70919 + Enabling this option will have the kernel automatically detect
70920 + and emulate the Procedure Linkage Table entries in ELF files.
70921 + On some architectures such entries are in writable memory, and
70922 + become non-executable leading to task termination. Therefore
70923 + it is mandatory that you enable this option on alpha, parisc,
70924 + sparc and sparc64, otherwise your system would not even boot.
70925 +
70926 + NOTE: this feature *does* open up a loophole in the protection
70927 + provided by the non-executable pages, therefore the proper
70928 + solution is to modify the toolchain to produce a PLT that does
70929 + not need to be writable.
70930 +
70931 +config PAX_DLRESOLVE
70932 + bool 'Emulate old glibc resolver stub'
70933 + depends on PAX_EMUPLT && SPARC
70934 + default n
70935 + help
70936 + This option is needed if userland has an old glibc (before 2.4)
70937 + that puts a 'save' instruction into the runtime generated resolver
70938 + stub that needs special emulation.
70939 +
70940 +config PAX_KERNEXEC
70941 + bool "Enforce non-executable kernel pages"
70942 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
70943 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
70944 + help
70945 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
70946 + that is, enabling this option will make it harder to inject
70947 + and execute 'foreign' code in kernel memory itself.
70948 +
70949 + Note that on x86_64 kernels there is a known regression when
70950 + this feature and KVM/VMX are both enabled in the host kernel.
70951 +
70952 +config PAX_KERNEXEC_MODULE_TEXT
70953 + int "Minimum amount of memory reserved for module code"
70954 + default "4"
70955 + depends on PAX_KERNEXEC && X86_32 && MODULES
70956 + help
70957 + Due to implementation details the kernel must reserve a fixed
70958 + amount of memory for module code at compile time that cannot be
70959 + changed at runtime. Here you can specify the minimum amount
70960 + in MB that will be reserved. Due to the same implementation
70961 + details this size will always be rounded up to the next 2/4 MB
70962 + boundary (depends on PAE) so the actually available memory for
70963 + module code will usually be more than this minimum.
70964 +
70965 + The default 4 MB should be enough for most users but if you have
70966 + an excessive number of modules (e.g., most distribution configs
70967 + compile many drivers as modules) or use huge modules such as
70968 + nvidia's kernel driver, you will need to adjust this amount.
70969 + A good rule of thumb is to look at your currently loaded kernel
70970 + modules and add up their sizes.
70971 +
70972 +endmenu
70973 +
70974 +menu "Address Space Layout Randomization"
70975 + depends on PAX
70976 +
70977 +config PAX_ASLR
70978 + bool "Address Space Layout Randomization"
70979 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
70980 + help
70981 + Many if not most exploit techniques rely on the knowledge of
70982 + certain addresses in the attacked program. The following options
70983 + will allow the kernel to apply a certain amount of randomization
70984 + to specific parts of the program thereby forcing an attacker to
70985 + guess them in most cases. Any failed guess will most likely crash
70986 + the attacked program which allows the kernel to detect such attempts
70987 + and react on them. PaX itself provides no reaction mechanisms,
70988 + instead it is strongly encouraged that you make use of Nergal's
70989 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
70990 + (http://www.grsecurity.net/) built-in crash detection features or
70991 + develop one yourself.
70992 +
70993 + By saying Y here you can choose to randomize the following areas:
70994 + - top of the task's kernel stack
70995 + - top of the task's userland stack
70996 + - base address for mmap() requests that do not specify one
70997 + (this includes all libraries)
70998 + - base address of the main executable
70999 +
71000 + It is strongly recommended to say Y here as address space layout
71001 + randomization has negligible impact on performance yet it provides
71002 + a very effective protection.
71003 +
71004 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
71005 + this feature on a per file basis.
71006 +
71007 +config PAX_RANDKSTACK
71008 + bool "Randomize kernel stack base"
71009 + depends on PAX_ASLR && X86_TSC && X86
71010 + help
71011 + By saying Y here the kernel will randomize every task's kernel
71012 + stack on every system call. This will not only force an attacker
71013 + to guess it but also prevent him from making use of possible
71014 + leaked information about it.
71015 +
71016 + Since the kernel stack is a rather scarce resource, randomization
71017 + may cause unexpected stack overflows, therefore you should very
71018 + carefully test your system. Note that once enabled in the kernel
71019 + configuration, this feature cannot be disabled on a per file basis.
71020 +
71021 +config PAX_RANDUSTACK
71022 + bool "Randomize user stack base"
71023 + depends on PAX_ASLR
71024 + help
71025 + By saying Y here the kernel will randomize every task's userland
71026 + stack. The randomization is done in two steps where the second
71027 + one may apply a big amount of shift to the top of the stack and
71028 + cause problems for programs that want to use lots of memory (more
71029 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
71030 + For this reason the second step can be controlled by 'chpax' or
71031 + 'paxctl' on a per file basis.
71032 +
71033 +config PAX_RANDMMAP
71034 + bool "Randomize mmap() base"
71035 + depends on PAX_ASLR
71036 + help
71037 + By saying Y here the kernel will use a randomized base address for
71038 + mmap() requests that do not specify one themselves. As a result
71039 + all dynamically loaded libraries will appear at random addresses
71040 + and therefore be harder to exploit by a technique where an attacker
71041 + attempts to execute library code for his purposes (e.g. spawn a
71042 + shell from an exploited program that is running at an elevated
71043 + privilege level).
71044 +
71045 + Furthermore, if a program is relinked as a dynamic ELF file, its
71046 + base address will be randomized as well, completing the full
71047 + randomization of the address space layout. Attacking such programs
71048 + becomes a guess game. You can find an example of doing this at
71049 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
71050 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
71051 +
71052 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
71053 + feature on a per file basis.
71054 +
71055 +endmenu
71056 +
71057 +menu "Miscellaneous hardening features"
71058 +
71059 +config PAX_MEMORY_SANITIZE
71060 + bool "Sanitize all freed memory"
71061 + help
71062 + By saying Y here the kernel will erase memory pages as soon as they
71063 + are freed. This in turn reduces the lifetime of data stored in the
71064 + pages, making it less likely that sensitive information such as
71065 + passwords, cryptographic secrets, etc stay in memory for too long.
71066 +
71067 + This is especially useful for programs whose runtime is short, long
71068 + lived processes and the kernel itself benefit from this as long as
71069 + they operate on whole memory pages and ensure timely freeing of pages
71070 + that may hold sensitive information.
71071 +
71072 + The tradeoff is performance impact, on a single CPU system kernel
71073 + compilation sees a 3% slowdown, other systems and workloads may vary
71074 + and you are advised to test this feature on your expected workload
71075 + before deploying it.
71076 +
71077 + Note that this feature does not protect data stored in live pages,
71078 + e.g., process memory swapped to disk may stay there for a long time.
71079 +
71080 +config PAX_MEMORY_STACKLEAK
71081 + bool "Sanitize kernel stack"
71082 + depends on X86
71083 + help
71084 + By saying Y here the kernel will erase the kernel stack before it
71085 + returns from a system call. This in turn reduces the information
71086 + that a kernel stack leak bug can reveal.
71087 +
71088 + Note that such a bug can still leak information that was put on
71089 + the stack by the current system call (the one eventually triggering
71090 + the bug) but traces of earlier system calls on the kernel stack
71091 + cannot leak anymore.
71092 +
71093 + The tradeoff is performance impact, on a single CPU system kernel
71094 + compilation sees a 1% slowdown, other systems and workloads may vary
71095 + and you are advised to test this feature on your expected workload
71096 + before deploying it.
71097 +
71098 + Note: full support for this feature requires gcc with plugin support
71099 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
71100 + is not supported). Using older gcc versions means that functions
71101 + with large enough stack frames may leave uninitialized memory behind
71102 + that may be exposed to a later syscall leaking the stack.
71103 +
71104 +config PAX_MEMORY_UDEREF
71105 + bool "Prevent invalid userland pointer dereference"
71106 + depends on X86 && !UML_X86 && !XEN
71107 + select PAX_PER_CPU_PGD if X86_64
71108 + help
71109 + By saying Y here the kernel will be prevented from dereferencing
71110 + userland pointers in contexts where the kernel expects only kernel
71111 + pointers. This is both a useful runtime debugging feature and a
71112 + security measure that prevents exploiting a class of kernel bugs.
71113 +
71114 + The tradeoff is that some virtualization solutions may experience
71115 + a huge slowdown and therefore you should not enable this feature
71116 + for kernels meant to run in such environments. Whether a given VM
71117 + solution is affected or not is best determined by simply trying it
71118 + out, the performance impact will be obvious right on boot as this
71119 + mechanism engages from very early on. A good rule of thumb is that
71120 + VMs running on CPUs without hardware virtualization support (i.e.,
71121 + the majority of IA-32 CPUs) will likely experience the slowdown.
71122 +
71123 +config PAX_REFCOUNT
71124 + bool "Prevent various kernel object reference counter overflows"
71125 + depends on GRKERNSEC && (X86 || SPARC64)
71126 + help
71127 + By saying Y here the kernel will detect and prevent overflowing
71128 + various (but not all) kinds of object reference counters. Such
71129 + overflows can normally occur due to bugs only and are often, if
71130 + not always, exploitable.
71131 +
71132 + The tradeoff is that data structures protected by an overflowed
71133 + refcount will never be freed and therefore will leak memory. Note
71134 + that this leak also happens even without this protection but in
71135 + that case the overflow can eventually trigger the freeing of the
71136 + data structure while it is still being used elsewhere, resulting
71137 + in the exploitable situation that this feature prevents.
71138 +
71139 + Since this has a negligible performance impact, you should enable
71140 + this feature.
71141 +
71142 +config PAX_USERCOPY
71143 + bool "Harden heap object copies between kernel and userland"
71144 + depends on X86 || PPC || SPARC
71145 + depends on GRKERNSEC && (SLAB || SLUB)
71146 + help
71147 + By saying Y here the kernel will enforce the size of heap objects
71148 + when they are copied in either direction between the kernel and
71149 + userland, even if only a part of the heap object is copied.
71150 +
71151 + Specifically, this checking prevents information leaking from the
71152 + kernel heap during kernel to userland copies (if the kernel heap
71153 + object is otherwise fully initialized) and prevents kernel heap
71154 + overflows during userland to kernel copies.
71155 +
71156 + Note that the current implementation provides the strictest bounds
71157 + checks for the SLUB allocator.
71158 +
71159 + Enabling this option also enables per-slab cache protection against
71160 + data in a given cache being copied into/out of via userland
71161 + accessors. Though the whitelist of regions will be reduced over
71162 + time, it notably protects important data structures like task structs.
71163 +
71164 +
71165 + If frame pointers are enabled on x86, this option will also
71166 + restrict copies into and out of the kernel stack to local variables
71167 + within a single frame.
71168 +
71169 + Since this has a negligible performance impact, you should enable
71170 + this feature.
71171 +
71172 +endmenu
71173 +
71174 +endmenu
71175 +
71176 config KEYS
71177 bool "Enable access key retention support"
71178 help
71179 @@ -146,7 +695,7 @@ config INTEL_TXT
71180 config LSM_MMAP_MIN_ADDR
71181 int "Low address space for LSM to protect from user allocation"
71182 depends on SECURITY && SECURITY_SELINUX
71183 - default 65536
71184 + default 32768
71185 help
71186 This is the portion of low virtual memory which should be protected
71187 from userspace allocation. Keeping a user from writing to low pages
71188 diff -urNp linux-2.6.32.42/security/keys/keyring.c linux-2.6.32.42/security/keys/keyring.c
71189 --- linux-2.6.32.42/security/keys/keyring.c 2011-03-27 14:31:47.000000000 -0400
71190 +++ linux-2.6.32.42/security/keys/keyring.c 2011-04-18 22:03:00.000000000 -0400
71191 @@ -214,15 +214,15 @@ static long keyring_read(const struct ke
71192 ret = -EFAULT;
71193
71194 for (loop = 0; loop < klist->nkeys; loop++) {
71195 + key_serial_t serial;
71196 key = klist->keys[loop];
71197 + serial = key->serial;
71198
71199 tmp = sizeof(key_serial_t);
71200 if (tmp > buflen)
71201 tmp = buflen;
71202
71203 - if (copy_to_user(buffer,
71204 - &key->serial,
71205 - tmp) != 0)
71206 + if (copy_to_user(buffer, &serial, tmp))
71207 goto error;
71208
71209 buflen -= tmp;
71210 diff -urNp linux-2.6.32.42/security/min_addr.c linux-2.6.32.42/security/min_addr.c
71211 --- linux-2.6.32.42/security/min_addr.c 2011-03-27 14:31:47.000000000 -0400
71212 +++ linux-2.6.32.42/security/min_addr.c 2011-04-17 15:56:46.000000000 -0400
71213 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
71214 */
71215 static void update_mmap_min_addr(void)
71216 {
71217 +#ifndef SPARC
71218 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
71219 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
71220 mmap_min_addr = dac_mmap_min_addr;
71221 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
71222 #else
71223 mmap_min_addr = dac_mmap_min_addr;
71224 #endif
71225 +#endif
71226 }
71227
71228 /*
71229 diff -urNp linux-2.6.32.42/security/root_plug.c linux-2.6.32.42/security/root_plug.c
71230 --- linux-2.6.32.42/security/root_plug.c 2011-03-27 14:31:47.000000000 -0400
71231 +++ linux-2.6.32.42/security/root_plug.c 2011-04-17 15:56:46.000000000 -0400
71232 @@ -70,7 +70,7 @@ static int rootplug_bprm_check_security
71233 return 0;
71234 }
71235
71236 -static struct security_operations rootplug_security_ops = {
71237 +static struct security_operations rootplug_security_ops __read_only = {
71238 .bprm_check_security = rootplug_bprm_check_security,
71239 };
71240
71241 diff -urNp linux-2.6.32.42/security/security.c linux-2.6.32.42/security/security.c
71242 --- linux-2.6.32.42/security/security.c 2011-03-27 14:31:47.000000000 -0400
71243 +++ linux-2.6.32.42/security/security.c 2011-04-17 15:56:46.000000000 -0400
71244 @@ -24,7 +24,7 @@ static __initdata char chosen_lsm[SECURI
71245 extern struct security_operations default_security_ops;
71246 extern void security_fixup_ops(struct security_operations *ops);
71247
71248 -struct security_operations *security_ops; /* Initialized to NULL */
71249 +struct security_operations *security_ops __read_only; /* Initialized to NULL */
71250
71251 static inline int verify(struct security_operations *ops)
71252 {
71253 @@ -106,7 +106,7 @@ int __init security_module_enable(struct
71254 * If there is already a security module registered with the kernel,
71255 * an error will be returned. Otherwise %0 is returned on success.
71256 */
71257 -int register_security(struct security_operations *ops)
71258 +int __init register_security(struct security_operations *ops)
71259 {
71260 if (verify(ops)) {
71261 printk(KERN_DEBUG "%s could not verify "
71262 diff -urNp linux-2.6.32.42/security/selinux/hooks.c linux-2.6.32.42/security/selinux/hooks.c
71263 --- linux-2.6.32.42/security/selinux/hooks.c 2011-03-27 14:31:47.000000000 -0400
71264 +++ linux-2.6.32.42/security/selinux/hooks.c 2011-04-17 15:56:46.000000000 -0400
71265 @@ -131,7 +131,7 @@ int selinux_enabled = 1;
71266 * Minimal support for a secondary security module,
71267 * just to allow the use of the capability module.
71268 */
71269 -static struct security_operations *secondary_ops;
71270 +static struct security_operations *secondary_ops __read_only;
71271
71272 /* Lists of inode and superblock security structures initialized
71273 before the policy was loaded. */
71274 @@ -5457,7 +5457,7 @@ static int selinux_key_getsecurity(struc
71275
71276 #endif
71277
71278 -static struct security_operations selinux_ops = {
71279 +static struct security_operations selinux_ops __read_only = {
71280 .name = "selinux",
71281
71282 .ptrace_access_check = selinux_ptrace_access_check,
71283 @@ -5841,7 +5841,9 @@ int selinux_disable(void)
71284 avc_disable();
71285
71286 /* Reset security_ops to the secondary module, dummy or capability. */
71287 + pax_open_kernel();
71288 security_ops = secondary_ops;
71289 + pax_close_kernel();
71290
71291 /* Unregister netfilter hooks. */
71292 selinux_nf_ip_exit();
71293 diff -urNp linux-2.6.32.42/security/selinux/include/xfrm.h linux-2.6.32.42/security/selinux/include/xfrm.h
71294 --- linux-2.6.32.42/security/selinux/include/xfrm.h 2011-03-27 14:31:47.000000000 -0400
71295 +++ linux-2.6.32.42/security/selinux/include/xfrm.h 2011-05-18 20:09:37.000000000 -0400
71296 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
71297
71298 static inline void selinux_xfrm_notify_policyload(void)
71299 {
71300 - atomic_inc(&flow_cache_genid);
71301 + atomic_inc_unchecked(&flow_cache_genid);
71302 }
71303 #else
71304 static inline int selinux_xfrm_enabled(void)
71305 diff -urNp linux-2.6.32.42/security/selinux/ss/services.c linux-2.6.32.42/security/selinux/ss/services.c
71306 --- linux-2.6.32.42/security/selinux/ss/services.c 2011-03-27 14:31:47.000000000 -0400
71307 +++ linux-2.6.32.42/security/selinux/ss/services.c 2011-05-16 21:46:57.000000000 -0400
71308 @@ -1715,6 +1715,8 @@ int security_load_policy(void *data, siz
71309 int rc = 0;
71310 struct policy_file file = { data, len }, *fp = &file;
71311
71312 + pax_track_stack();
71313 +
71314 if (!ss_initialized) {
71315 avtab_cache_init();
71316 if (policydb_read(&policydb, fp)) {
71317 diff -urNp linux-2.6.32.42/security/smack/smack_lsm.c linux-2.6.32.42/security/smack/smack_lsm.c
71318 --- linux-2.6.32.42/security/smack/smack_lsm.c 2011-03-27 14:31:47.000000000 -0400
71319 +++ linux-2.6.32.42/security/smack/smack_lsm.c 2011-04-17 15:56:46.000000000 -0400
71320 @@ -3073,7 +3073,7 @@ static int smack_inode_getsecctx(struct
71321 return 0;
71322 }
71323
71324 -struct security_operations smack_ops = {
71325 +struct security_operations smack_ops __read_only = {
71326 .name = "smack",
71327
71328 .ptrace_access_check = smack_ptrace_access_check,
71329 diff -urNp linux-2.6.32.42/security/tomoyo/tomoyo.c linux-2.6.32.42/security/tomoyo/tomoyo.c
71330 --- linux-2.6.32.42/security/tomoyo/tomoyo.c 2011-03-27 14:31:47.000000000 -0400
71331 +++ linux-2.6.32.42/security/tomoyo/tomoyo.c 2011-04-17 15:56:46.000000000 -0400
71332 @@ -275,7 +275,7 @@ static int tomoyo_dentry_open(struct fil
71333 * tomoyo_security_ops is a "struct security_operations" which is used for
71334 * registering TOMOYO.
71335 */
71336 -static struct security_operations tomoyo_security_ops = {
71337 +static struct security_operations tomoyo_security_ops __read_only = {
71338 .name = "tomoyo",
71339 .cred_alloc_blank = tomoyo_cred_alloc_blank,
71340 .cred_prepare = tomoyo_cred_prepare,
71341 diff -urNp linux-2.6.32.42/sound/aoa/codecs/onyx.c linux-2.6.32.42/sound/aoa/codecs/onyx.c
71342 --- linux-2.6.32.42/sound/aoa/codecs/onyx.c 2011-03-27 14:31:47.000000000 -0400
71343 +++ linux-2.6.32.42/sound/aoa/codecs/onyx.c 2011-04-17 15:56:46.000000000 -0400
71344 @@ -53,7 +53,7 @@ struct onyx {
71345 spdif_locked:1,
71346 analog_locked:1,
71347 original_mute:2;
71348 - int open_count;
71349 + local_t open_count;
71350 struct codec_info *codec_info;
71351
71352 /* mutex serializes concurrent access to the device
71353 @@ -752,7 +752,7 @@ static int onyx_open(struct codec_info_i
71354 struct onyx *onyx = cii->codec_data;
71355
71356 mutex_lock(&onyx->mutex);
71357 - onyx->open_count++;
71358 + local_inc(&onyx->open_count);
71359 mutex_unlock(&onyx->mutex);
71360
71361 return 0;
71362 @@ -764,8 +764,7 @@ static int onyx_close(struct codec_info_
71363 struct onyx *onyx = cii->codec_data;
71364
71365 mutex_lock(&onyx->mutex);
71366 - onyx->open_count--;
71367 - if (!onyx->open_count)
71368 + if (local_dec_and_test(&onyx->open_count))
71369 onyx->spdif_locked = onyx->analog_locked = 0;
71370 mutex_unlock(&onyx->mutex);
71371
71372 diff -urNp linux-2.6.32.42/sound/aoa/codecs/onyx.h linux-2.6.32.42/sound/aoa/codecs/onyx.h
71373 --- linux-2.6.32.42/sound/aoa/codecs/onyx.h 2011-03-27 14:31:47.000000000 -0400
71374 +++ linux-2.6.32.42/sound/aoa/codecs/onyx.h 2011-04-17 15:56:46.000000000 -0400
71375 @@ -11,6 +11,7 @@
71376 #include <linux/i2c.h>
71377 #include <asm/pmac_low_i2c.h>
71378 #include <asm/prom.h>
71379 +#include <asm/local.h>
71380
71381 /* PCM3052 register definitions */
71382
71383 diff -urNp linux-2.6.32.42/sound/drivers/mts64.c linux-2.6.32.42/sound/drivers/mts64.c
71384 --- linux-2.6.32.42/sound/drivers/mts64.c 2011-03-27 14:31:47.000000000 -0400
71385 +++ linux-2.6.32.42/sound/drivers/mts64.c 2011-04-17 15:56:46.000000000 -0400
71386 @@ -27,6 +27,7 @@
71387 #include <sound/initval.h>
71388 #include <sound/rawmidi.h>
71389 #include <sound/control.h>
71390 +#include <asm/local.h>
71391
71392 #define CARD_NAME "Miditerminal 4140"
71393 #define DRIVER_NAME "MTS64"
71394 @@ -65,7 +66,7 @@ struct mts64 {
71395 struct pardevice *pardev;
71396 int pardev_claimed;
71397
71398 - int open_count;
71399 + local_t open_count;
71400 int current_midi_output_port;
71401 int current_midi_input_port;
71402 u8 mode[MTS64_NUM_INPUT_PORTS];
71403 @@ -695,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct
71404 {
71405 struct mts64 *mts = substream->rmidi->private_data;
71406
71407 - if (mts->open_count == 0) {
71408 + if (local_read(&mts->open_count) == 0) {
71409 /* We don't need a spinlock here, because this is just called
71410 if the device has not been opened before.
71411 So there aren't any IRQs from the device */
71412 @@ -703,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct
71413
71414 msleep(50);
71415 }
71416 - ++(mts->open_count);
71417 + local_inc(&mts->open_count);
71418
71419 return 0;
71420 }
71421 @@ -713,8 +714,7 @@ static int snd_mts64_rawmidi_close(struc
71422 struct mts64 *mts = substream->rmidi->private_data;
71423 unsigned long flags;
71424
71425 - --(mts->open_count);
71426 - if (mts->open_count == 0) {
71427 + if (local_dec_return(&mts->open_count) == 0) {
71428 /* We need the spinlock_irqsave here because we can still
71429 have IRQs at this point */
71430 spin_lock_irqsave(&mts->lock, flags);
71431 @@ -723,8 +723,8 @@ static int snd_mts64_rawmidi_close(struc
71432
71433 msleep(500);
71434
71435 - } else if (mts->open_count < 0)
71436 - mts->open_count = 0;
71437 + } else if (local_read(&mts->open_count) < 0)
71438 + local_set(&mts->open_count, 0);
71439
71440 return 0;
71441 }
71442 diff -urNp linux-2.6.32.42/sound/drivers/portman2x4.c linux-2.6.32.42/sound/drivers/portman2x4.c
71443 --- linux-2.6.32.42/sound/drivers/portman2x4.c 2011-03-27 14:31:47.000000000 -0400
71444 +++ linux-2.6.32.42/sound/drivers/portman2x4.c 2011-04-17 15:56:46.000000000 -0400
71445 @@ -46,6 +46,7 @@
71446 #include <sound/initval.h>
71447 #include <sound/rawmidi.h>
71448 #include <sound/control.h>
71449 +#include <asm/local.h>
71450
71451 #define CARD_NAME "Portman 2x4"
71452 #define DRIVER_NAME "portman"
71453 @@ -83,7 +84,7 @@ struct portman {
71454 struct pardevice *pardev;
71455 int pardev_claimed;
71456
71457 - int open_count;
71458 + local_t open_count;
71459 int mode[PORTMAN_NUM_INPUT_PORTS];
71460 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
71461 };
71462 diff -urNp linux-2.6.32.42/sound/oss/sb_audio.c linux-2.6.32.42/sound/oss/sb_audio.c
71463 --- linux-2.6.32.42/sound/oss/sb_audio.c 2011-03-27 14:31:47.000000000 -0400
71464 +++ linux-2.6.32.42/sound/oss/sb_audio.c 2011-04-17 15:56:46.000000000 -0400
71465 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
71466 buf16 = (signed short *)(localbuf + localoffs);
71467 while (c)
71468 {
71469 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
71470 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
71471 if (copy_from_user(lbuf8,
71472 userbuf+useroffs + p,
71473 locallen))
71474 diff -urNp linux-2.6.32.42/sound/oss/swarm_cs4297a.c linux-2.6.32.42/sound/oss/swarm_cs4297a.c
71475 --- linux-2.6.32.42/sound/oss/swarm_cs4297a.c 2011-03-27 14:31:47.000000000 -0400
71476 +++ linux-2.6.32.42/sound/oss/swarm_cs4297a.c 2011-04-17 15:56:46.000000000 -0400
71477 @@ -2577,7 +2577,6 @@ static int __init cs4297a_init(void)
71478 {
71479 struct cs4297a_state *s;
71480 u32 pwr, id;
71481 - mm_segment_t fs;
71482 int rval;
71483 #ifndef CONFIG_BCM_CS4297A_CSWARM
71484 u64 cfg;
71485 @@ -2667,22 +2666,23 @@ static int __init cs4297a_init(void)
71486 if (!rval) {
71487 char *sb1250_duart_present;
71488
71489 +#if 0
71490 + mm_segment_t fs;
71491 fs = get_fs();
71492 set_fs(KERNEL_DS);
71493 -#if 0
71494 val = SOUND_MASK_LINE;
71495 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
71496 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
71497 val = initvol[i].vol;
71498 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
71499 }
71500 + set_fs(fs);
71501 // cs4297a_write_ac97(s, 0x18, 0x0808);
71502 #else
71503 // cs4297a_write_ac97(s, 0x5e, 0x180);
71504 cs4297a_write_ac97(s, 0x02, 0x0808);
71505 cs4297a_write_ac97(s, 0x18, 0x0808);
71506 #endif
71507 - set_fs(fs);
71508
71509 list_add(&s->list, &cs4297a_devs);
71510
71511 diff -urNp linux-2.6.32.42/sound/pci/ac97/ac97_codec.c linux-2.6.32.42/sound/pci/ac97/ac97_codec.c
71512 --- linux-2.6.32.42/sound/pci/ac97/ac97_codec.c 2011-03-27 14:31:47.000000000 -0400
71513 +++ linux-2.6.32.42/sound/pci/ac97/ac97_codec.c 2011-04-17 15:56:46.000000000 -0400
71514 @@ -1952,7 +1952,7 @@ static int snd_ac97_dev_disconnect(struc
71515 }
71516
71517 /* build_ops to do nothing */
71518 -static struct snd_ac97_build_ops null_build_ops;
71519 +static const struct snd_ac97_build_ops null_build_ops;
71520
71521 #ifdef CONFIG_SND_AC97_POWER_SAVE
71522 static void do_update_power(struct work_struct *work)
71523 diff -urNp linux-2.6.32.42/sound/pci/ac97/ac97_patch.c linux-2.6.32.42/sound/pci/ac97/ac97_patch.c
71524 --- linux-2.6.32.42/sound/pci/ac97/ac97_patch.c 2011-03-27 14:31:47.000000000 -0400
71525 +++ linux-2.6.32.42/sound/pci/ac97/ac97_patch.c 2011-04-23 12:56:12.000000000 -0400
71526 @@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spd
71527 return 0;
71528 }
71529
71530 -static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
71531 +static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = {
71532 .build_spdif = patch_yamaha_ymf743_build_spdif,
71533 .build_3d = patch_yamaha_ymf7x3_3d,
71534 };
71535 @@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdi
71536 return 0;
71537 }
71538
71539 -static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
71540 +static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = {
71541 .build_3d = patch_yamaha_ymf7x3_3d,
71542 .build_post_spdif = patch_yamaha_ymf753_post_spdif
71543 };
71544 @@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific
71545 return 0;
71546 }
71547
71548 -static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
71549 +static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
71550 .build_specific = patch_wolfson_wm9703_specific,
71551 };
71552
71553 @@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific
71554 return 0;
71555 }
71556
71557 -static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
71558 +static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
71559 .build_specific = patch_wolfson_wm9704_specific,
71560 };
71561
71562 @@ -555,7 +555,7 @@ static int patch_wolfson_wm9705_specific
71563 return 0;
71564 }
71565
71566 -static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
71567 +static const struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
71568 .build_specific = patch_wolfson_wm9705_specific,
71569 };
71570
71571 @@ -692,7 +692,7 @@ static int patch_wolfson_wm9711_specific
71572 return 0;
71573 }
71574
71575 -static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
71576 +static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
71577 .build_specific = patch_wolfson_wm9711_specific,
71578 };
71579
71580 @@ -886,7 +886,7 @@ static void patch_wolfson_wm9713_resume
71581 }
71582 #endif
71583
71584 -static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
71585 +static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
71586 .build_specific = patch_wolfson_wm9713_specific,
71587 .build_3d = patch_wolfson_wm9713_3d,
71588 #ifdef CONFIG_PM
71589 @@ -991,7 +991,7 @@ static int patch_sigmatel_stac97xx_speci
71590 return 0;
71591 }
71592
71593 -static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
71594 +static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = {
71595 .build_3d = patch_sigmatel_stac9700_3d,
71596 .build_specific = patch_sigmatel_stac97xx_specific
71597 };
71598 @@ -1038,7 +1038,7 @@ static int patch_sigmatel_stac9708_speci
71599 return patch_sigmatel_stac97xx_specific(ac97);
71600 }
71601
71602 -static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
71603 +static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = {
71604 .build_3d = patch_sigmatel_stac9708_3d,
71605 .build_specific = patch_sigmatel_stac9708_specific
71606 };
71607 @@ -1267,7 +1267,7 @@ static int patch_sigmatel_stac9758_speci
71608 return 0;
71609 }
71610
71611 -static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
71612 +static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = {
71613 .build_3d = patch_sigmatel_stac9700_3d,
71614 .build_specific = patch_sigmatel_stac9758_specific
71615 };
71616 @@ -1342,7 +1342,7 @@ static int patch_cirrus_build_spdif(stru
71617 return 0;
71618 }
71619
71620 -static struct snd_ac97_build_ops patch_cirrus_ops = {
71621 +static const struct snd_ac97_build_ops patch_cirrus_ops = {
71622 .build_spdif = patch_cirrus_build_spdif
71623 };
71624
71625 @@ -1399,7 +1399,7 @@ static int patch_conexant_build_spdif(st
71626 return 0;
71627 }
71628
71629 -static struct snd_ac97_build_ops patch_conexant_ops = {
71630 +static const struct snd_ac97_build_ops patch_conexant_ops = {
71631 .build_spdif = patch_conexant_build_spdif
71632 };
71633
71634 @@ -1575,7 +1575,7 @@ static void patch_ad1881_chained(struct
71635 }
71636 }
71637
71638 -static struct snd_ac97_build_ops patch_ad1881_build_ops = {
71639 +static const struct snd_ac97_build_ops patch_ad1881_build_ops = {
71640 #ifdef CONFIG_PM
71641 .resume = ad18xx_resume
71642 #endif
71643 @@ -1662,7 +1662,7 @@ static int patch_ad1885_specific(struct
71644 return 0;
71645 }
71646
71647 -static struct snd_ac97_build_ops patch_ad1885_build_ops = {
71648 +static const struct snd_ac97_build_ops patch_ad1885_build_ops = {
71649 .build_specific = &patch_ad1885_specific,
71650 #ifdef CONFIG_PM
71651 .resume = ad18xx_resume
71652 @@ -1689,7 +1689,7 @@ static int patch_ad1886_specific(struct
71653 return 0;
71654 }
71655
71656 -static struct snd_ac97_build_ops patch_ad1886_build_ops = {
71657 +static const struct snd_ac97_build_ops patch_ad1886_build_ops = {
71658 .build_specific = &patch_ad1886_specific,
71659 #ifdef CONFIG_PM
71660 .resume = ad18xx_resume
71661 @@ -1896,7 +1896,7 @@ static int patch_ad1981a_specific(struct
71662 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
71663 }
71664
71665 -static struct snd_ac97_build_ops patch_ad1981a_build_ops = {
71666 +static const struct snd_ac97_build_ops patch_ad1981a_build_ops = {
71667 .build_post_spdif = patch_ad198x_post_spdif,
71668 .build_specific = patch_ad1981a_specific,
71669 #ifdef CONFIG_PM
71670 @@ -1951,7 +1951,7 @@ static int patch_ad1981b_specific(struct
71671 ARRAY_SIZE(snd_ac97_ad1981x_jack_sense));
71672 }
71673
71674 -static struct snd_ac97_build_ops patch_ad1981b_build_ops = {
71675 +static const struct snd_ac97_build_ops patch_ad1981b_build_ops = {
71676 .build_post_spdif = patch_ad198x_post_spdif,
71677 .build_specific = patch_ad1981b_specific,
71678 #ifdef CONFIG_PM
71679 @@ -2090,7 +2090,7 @@ static int patch_ad1888_specific(struct
71680 return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls));
71681 }
71682
71683 -static struct snd_ac97_build_ops patch_ad1888_build_ops = {
71684 +static const struct snd_ac97_build_ops patch_ad1888_build_ops = {
71685 .build_post_spdif = patch_ad198x_post_spdif,
71686 .build_specific = patch_ad1888_specific,
71687 #ifdef CONFIG_PM
71688 @@ -2139,7 +2139,7 @@ static int patch_ad1980_specific(struct
71689 return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1);
71690 }
71691
71692 -static struct snd_ac97_build_ops patch_ad1980_build_ops = {
71693 +static const struct snd_ac97_build_ops patch_ad1980_build_ops = {
71694 .build_post_spdif = patch_ad198x_post_spdif,
71695 .build_specific = patch_ad1980_specific,
71696 #ifdef CONFIG_PM
71697 @@ -2254,7 +2254,7 @@ static int patch_ad1985_specific(struct
71698 ARRAY_SIZE(snd_ac97_ad1985_controls));
71699 }
71700
71701 -static struct snd_ac97_build_ops patch_ad1985_build_ops = {
71702 +static const struct snd_ac97_build_ops patch_ad1985_build_ops = {
71703 .build_post_spdif = patch_ad198x_post_spdif,
71704 .build_specific = patch_ad1985_specific,
71705 #ifdef CONFIG_PM
71706 @@ -2546,7 +2546,7 @@ static int patch_ad1986_specific(struct
71707 ARRAY_SIZE(snd_ac97_ad1985_controls));
71708 }
71709
71710 -static struct snd_ac97_build_ops patch_ad1986_build_ops = {
71711 +static const struct snd_ac97_build_ops patch_ad1986_build_ops = {
71712 .build_post_spdif = patch_ad198x_post_spdif,
71713 .build_specific = patch_ad1986_specific,
71714 #ifdef CONFIG_PM
71715 @@ -2651,7 +2651,7 @@ static int patch_alc650_specific(struct
71716 return 0;
71717 }
71718
71719 -static struct snd_ac97_build_ops patch_alc650_ops = {
71720 +static const struct snd_ac97_build_ops patch_alc650_ops = {
71721 .build_specific = patch_alc650_specific,
71722 .update_jacks = alc650_update_jacks
71723 };
71724 @@ -2803,7 +2803,7 @@ static int patch_alc655_specific(struct
71725 return 0;
71726 }
71727
71728 -static struct snd_ac97_build_ops patch_alc655_ops = {
71729 +static const struct snd_ac97_build_ops patch_alc655_ops = {
71730 .build_specific = patch_alc655_specific,
71731 .update_jacks = alc655_update_jacks
71732 };
71733 @@ -2915,7 +2915,7 @@ static int patch_alc850_specific(struct
71734 return 0;
71735 }
71736
71737 -static struct snd_ac97_build_ops patch_alc850_ops = {
71738 +static const struct snd_ac97_build_ops patch_alc850_ops = {
71739 .build_specific = patch_alc850_specific,
71740 .update_jacks = alc850_update_jacks
71741 };
71742 @@ -2977,7 +2977,7 @@ static int patch_cm9738_specific(struct
71743 return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls));
71744 }
71745
71746 -static struct snd_ac97_build_ops patch_cm9738_ops = {
71747 +static const struct snd_ac97_build_ops patch_cm9738_ops = {
71748 .build_specific = patch_cm9738_specific,
71749 .update_jacks = cm9738_update_jacks
71750 };
71751 @@ -3068,7 +3068,7 @@ static int patch_cm9739_post_spdif(struc
71752 return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif));
71753 }
71754
71755 -static struct snd_ac97_build_ops patch_cm9739_ops = {
71756 +static const struct snd_ac97_build_ops patch_cm9739_ops = {
71757 .build_specific = patch_cm9739_specific,
71758 .build_post_spdif = patch_cm9739_post_spdif,
71759 .update_jacks = cm9739_update_jacks
71760 @@ -3242,7 +3242,7 @@ static int patch_cm9761_specific(struct
71761 return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls));
71762 }
71763
71764 -static struct snd_ac97_build_ops patch_cm9761_ops = {
71765 +static const struct snd_ac97_build_ops patch_cm9761_ops = {
71766 .build_specific = patch_cm9761_specific,
71767 .build_post_spdif = patch_cm9761_post_spdif,
71768 .update_jacks = cm9761_update_jacks
71769 @@ -3338,7 +3338,7 @@ static int patch_cm9780_specific(struct
71770 return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls));
71771 }
71772
71773 -static struct snd_ac97_build_ops patch_cm9780_ops = {
71774 +static const struct snd_ac97_build_ops patch_cm9780_ops = {
71775 .build_specific = patch_cm9780_specific,
71776 .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */
71777 };
71778 @@ -3458,7 +3458,7 @@ static int patch_vt1616_specific(struct
71779 return 0;
71780 }
71781
71782 -static struct snd_ac97_build_ops patch_vt1616_ops = {
71783 +static const struct snd_ac97_build_ops patch_vt1616_ops = {
71784 .build_specific = patch_vt1616_specific
71785 };
71786
71787 @@ -3812,7 +3812,7 @@ static int patch_it2646_specific(struct
71788 return 0;
71789 }
71790
71791 -static struct snd_ac97_build_ops patch_it2646_ops = {
71792 +static const struct snd_ac97_build_ops patch_it2646_ops = {
71793 .build_specific = patch_it2646_specific,
71794 .update_jacks = it2646_update_jacks
71795 };
71796 @@ -3846,7 +3846,7 @@ static int patch_si3036_specific(struct
71797 return 0;
71798 }
71799
71800 -static struct snd_ac97_build_ops patch_si3036_ops = {
71801 +static const struct snd_ac97_build_ops patch_si3036_ops = {
71802 .build_specific = patch_si3036_specific,
71803 };
71804
71805 @@ -3913,7 +3913,7 @@ static int patch_ucb1400_specific(struct
71806 return 0;
71807 }
71808
71809 -static struct snd_ac97_build_ops patch_ucb1400_ops = {
71810 +static const struct snd_ac97_build_ops patch_ucb1400_ops = {
71811 .build_specific = patch_ucb1400_specific,
71812 };
71813
71814 diff -urNp linux-2.6.32.42/sound/pci/hda/patch_intelhdmi.c linux-2.6.32.42/sound/pci/hda/patch_intelhdmi.c
71815 --- linux-2.6.32.42/sound/pci/hda/patch_intelhdmi.c 2011-03-27 14:31:47.000000000 -0400
71816 +++ linux-2.6.32.42/sound/pci/hda/patch_intelhdmi.c 2011-04-17 15:56:46.000000000 -0400
71817 @@ -511,10 +511,10 @@ static void hdmi_non_intrinsic_event(str
71818 cp_ready);
71819
71820 /* TODO */
71821 - if (cp_state)
71822 - ;
71823 - if (cp_ready)
71824 - ;
71825 + if (cp_state) {
71826 + }
71827 + if (cp_ready) {
71828 + }
71829 }
71830
71831
71832 diff -urNp linux-2.6.32.42/sound/pci/intel8x0m.c linux-2.6.32.42/sound/pci/intel8x0m.c
71833 --- linux-2.6.32.42/sound/pci/intel8x0m.c 2011-03-27 14:31:47.000000000 -0400
71834 +++ linux-2.6.32.42/sound/pci/intel8x0m.c 2011-04-23 12:56:12.000000000 -0400
71835 @@ -1264,7 +1264,7 @@ static struct shortname_table {
71836 { 0x5455, "ALi M5455" },
71837 { 0x746d, "AMD AMD8111" },
71838 #endif
71839 - { 0 },
71840 + { 0, },
71841 };
71842
71843 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
71844 diff -urNp linux-2.6.32.42/sound/pci/ymfpci/ymfpci_main.c linux-2.6.32.42/sound/pci/ymfpci/ymfpci_main.c
71845 --- linux-2.6.32.42/sound/pci/ymfpci/ymfpci_main.c 2011-03-27 14:31:47.000000000 -0400
71846 +++ linux-2.6.32.42/sound/pci/ymfpci/ymfpci_main.c 2011-05-04 17:56:28.000000000 -0400
71847 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
71848 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
71849 break;
71850 }
71851 - if (atomic_read(&chip->interrupt_sleep_count)) {
71852 - atomic_set(&chip->interrupt_sleep_count, 0);
71853 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
71854 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
71855 wake_up(&chip->interrupt_sleep);
71856 }
71857 __end:
71858 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
71859 continue;
71860 init_waitqueue_entry(&wait, current);
71861 add_wait_queue(&chip->interrupt_sleep, &wait);
71862 - atomic_inc(&chip->interrupt_sleep_count);
71863 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
71864 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
71865 remove_wait_queue(&chip->interrupt_sleep, &wait);
71866 }
71867 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
71868 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
71869 spin_unlock(&chip->reg_lock);
71870
71871 - if (atomic_read(&chip->interrupt_sleep_count)) {
71872 - atomic_set(&chip->interrupt_sleep_count, 0);
71873 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
71874 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
71875 wake_up(&chip->interrupt_sleep);
71876 }
71877 }
71878 @@ -2369,7 +2369,7 @@ int __devinit snd_ymfpci_create(struct s
71879 spin_lock_init(&chip->reg_lock);
71880 spin_lock_init(&chip->voice_lock);
71881 init_waitqueue_head(&chip->interrupt_sleep);
71882 - atomic_set(&chip->interrupt_sleep_count, 0);
71883 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
71884 chip->card = card;
71885 chip->pci = pci;
71886 chip->irq = -1;
71887 diff -urNp linux-2.6.32.42/tools/gcc/Makefile linux-2.6.32.42/tools/gcc/Makefile
71888 --- linux-2.6.32.42/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
71889 +++ linux-2.6.32.42/tools/gcc/Makefile 2011-06-04 20:52:13.000000000 -0400
71890 @@ -0,0 +1,11 @@
71891 +#CC := gcc
71892 +#PLUGIN_SOURCE_FILES := pax_plugin.c
71893 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
71894 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
71895 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
71896 +
71897 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
71898 +
71899 +hostlibs-y := pax_plugin.so
71900 +always := $(hostlibs-y)
71901 +pax_plugin-objs := pax_plugin.o
71902 diff -urNp linux-2.6.32.42/tools/gcc/pax_plugin.c linux-2.6.32.42/tools/gcc/pax_plugin.c
71903 --- linux-2.6.32.42/tools/gcc/pax_plugin.c 1969-12-31 19:00:00.000000000 -0500
71904 +++ linux-2.6.32.42/tools/gcc/pax_plugin.c 2011-06-04 20:52:13.000000000 -0400
71905 @@ -0,0 +1,242 @@
71906 +/*
71907 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
71908 + * Licensed under the GPL v2
71909 + *
71910 + * Note: the choice of the license means that the compilation process is
71911 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
71912 + * but for the kernel it doesn't matter since it doesn't link against
71913 + * any of the gcc libraries
71914 + *
71915 + * gcc plugin to help implement various PaX features
71916 + *
71917 + * - track lowest stack pointer
71918 + *
71919 + * TODO:
71920 + * - initialize all local variables
71921 + *
71922 + * BUGS:
71923 + */
71924 +#include "gcc-plugin.h"
71925 +#include "plugin-version.h"
71926 +#include "config.h"
71927 +#include "system.h"
71928 +#include "coretypes.h"
71929 +#include "tm.h"
71930 +#include "toplev.h"
71931 +#include "basic-block.h"
71932 +#include "gimple.h"
71933 +//#include "expr.h" where are you...
71934 +#include "diagnostic.h"
71935 +#include "rtl.h"
71936 +#include "emit-rtl.h"
71937 +#include "function.h"
71938 +#include "tree.h"
71939 +#include "tree-pass.h"
71940 +#include "intl.h"
71941 +
71942 +int plugin_is_GPL_compatible;
71943 +
71944 +static int track_frame_size = -1;
71945 +static const char track_function[] = "pax_track_stack";
71946 +static bool init_locals;
71947 +
71948 +static struct plugin_info pax_plugin_info = {
71949 + .version = "201106030000",
71950 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
71951 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
71952 +};
71953 +
71954 +static bool gate_pax_track_stack(void);
71955 +static unsigned int execute_pax_tree_instrument(void);
71956 +static unsigned int execute_pax_final(void);
71957 +
71958 +static struct gimple_opt_pass pax_tree_instrument_pass = {
71959 + .pass = {
71960 + .type = GIMPLE_PASS,
71961 + .name = "pax_tree_instrument",
71962 + .gate = gate_pax_track_stack,
71963 + .execute = execute_pax_tree_instrument,
71964 + .sub = NULL,
71965 + .next = NULL,
71966 + .static_pass_number = 0,
71967 + .tv_id = TV_NONE,
71968 + .properties_required = PROP_gimple_leh | PROP_cfg,
71969 + .properties_provided = 0,
71970 + .properties_destroyed = 0,
71971 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
71972 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
71973 + }
71974 +};
71975 +
71976 +static struct rtl_opt_pass pax_final_rtl_opt_pass = {
71977 + .pass = {
71978 + .type = RTL_PASS,
71979 + .name = "pax_final",
71980 + .gate = gate_pax_track_stack,
71981 + .execute = execute_pax_final,
71982 + .sub = NULL,
71983 + .next = NULL,
71984 + .static_pass_number = 0,
71985 + .tv_id = TV_NONE,
71986 + .properties_required = 0,
71987 + .properties_provided = 0,
71988 + .properties_destroyed = 0,
71989 + .todo_flags_start = 0,
71990 + .todo_flags_finish = 0
71991 + }
71992 +};
71993 +
71994 +static bool gate_pax_track_stack(void)
71995 +{
71996 + return track_frame_size >= 0;
71997 +}
71998 +
71999 +static void pax_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
72000 +{
72001 + gimple call;
72002 + tree decl, type;
72003 +
72004 + // insert call to void pax_track_stack(void)
72005 + type = build_function_type_list(void_type_node, NULL_TREE);
72006 + decl = build_fn_decl(track_function, type);
72007 + DECL_ASSEMBLER_NAME(decl); // for LTO
72008 + call = gimple_build_call(decl, 0);
72009 + if (before)
72010 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
72011 + else
72012 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
72013 +}
72014 +
72015 +static unsigned int execute_pax_tree_instrument(void)
72016 +{
72017 + basic_block bb;
72018 + gimple_stmt_iterator gsi;
72019 +
72020 + // 1. loop through BBs and GIMPLE statements
72021 + FOR_EACH_BB(bb) {
72022 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
72023 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
72024 + tree decl;
72025 + gimple stmt = gsi_stmt(gsi);
72026 +
72027 + if (!is_gimple_call(stmt))
72028 + continue;
72029 + decl = gimple_call_fndecl(stmt);
72030 + if (!decl)
72031 + continue;
72032 + if (TREE_CODE(decl) != FUNCTION_DECL)
72033 + continue;
72034 + if (!DECL_BUILT_IN(decl))
72035 + continue;
72036 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
72037 + continue;
72038 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
72039 + continue;
72040 +
72041 + // 2. insert track call after each __builtin_alloca call
72042 + pax_add_instrumentation(&gsi, false);
72043 +// print_node(stderr, "pax", decl, 4);
72044 + }
72045 + }
72046 +
72047 + // 3. insert track call at the beginning
72048 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
72049 + gsi = gsi_start_bb(bb);
72050 + pax_add_instrumentation(&gsi, true);
72051 +
72052 + return 0;
72053 +}
72054 +
72055 +static unsigned int execute_pax_final(void)
72056 +{
72057 + rtx insn;
72058 +
72059 + if (cfun->calls_alloca)
72060 + return 0;
72061 +
72062 + // 1. find pax_track_stack calls
72063 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
72064 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
72065 + rtx body;
72066 +
72067 + if (!CALL_P(insn))
72068 + continue;
72069 + body = PATTERN(insn);
72070 + if (GET_CODE(body) != CALL)
72071 + continue;
72072 + body = XEXP(body, 0);
72073 + if (GET_CODE(body) != MEM)
72074 + continue;
72075 + body = XEXP(body, 0);
72076 + if (GET_CODE(body) != SYMBOL_REF)
72077 + continue;
72078 + if (strcmp(XSTR(body, 0), track_function))
72079 + continue;
72080 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
72081 + // 2. delete call if function frame is not big enough
72082 + if (get_frame_size() >= track_frame_size)
72083 + continue;
72084 + delete_insn_and_edges(insn);
72085 + }
72086 +
72087 +// print_simple_rtl(stderr, get_insns());
72088 +// print_rtl(stderr, get_insns());
72089 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
72090 +
72091 + return 0;
72092 +}
72093 +
72094 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
72095 +{
72096 + const char * const plugin_name = plugin_info->base_name;
72097 + const int argc = plugin_info->argc;
72098 + const struct plugin_argument * const argv = plugin_info->argv;
72099 + int i;
72100 + struct register_pass_info pax_tree_instrument_pass_info = {
72101 + .pass = &pax_tree_instrument_pass.pass,
72102 +// .reference_pass_name = "tree_profile",
72103 + .reference_pass_name = "optimized",
72104 + .ref_pass_instance_number = 0,
72105 + .pos_op = PASS_POS_INSERT_AFTER
72106 + };
72107 + struct register_pass_info pax_final_pass_info = {
72108 + .pass = &pax_final_rtl_opt_pass.pass,
72109 + .reference_pass_name = "final",
72110 + .ref_pass_instance_number = 0,
72111 + .pos_op = PASS_POS_INSERT_BEFORE
72112 + };
72113 +
72114 + if (!plugin_default_version_check(version, &gcc_version)) {
72115 + error(G_("incompatible gcc/plugin versions"));
72116 + return 1;
72117 + }
72118 +
72119 + register_callback(plugin_name, PLUGIN_INFO, NULL, &pax_plugin_info);
72120 +
72121 + for (i = 0; i < argc; ++i) {
72122 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
72123 + if (!argv[i].value) {
72124 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
72125 + continue;
72126 + }
72127 + track_frame_size = atoi(argv[i].value);
72128 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
72129 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
72130 + continue;
72131 + }
72132 + if (!strcmp(argv[i].key, "initialize-locals")) {
72133 + if (argv[i].value) {
72134 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
72135 + continue;
72136 + }
72137 + init_locals = true;
72138 + continue;
72139 + }
72140 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
72141 + }
72142 +
72143 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_tree_instrument_pass_info);
72144 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &pax_final_pass_info);
72145 +
72146 + return 0;
72147 +}
72148 Binary files linux-2.6.32.42/tools/gcc/pax_plugin.so and linux-2.6.32.42/tools/gcc/pax_plugin.so differ
72149 diff -urNp linux-2.6.32.42/usr/gen_init_cpio.c linux-2.6.32.42/usr/gen_init_cpio.c
72150 --- linux-2.6.32.42/usr/gen_init_cpio.c 2011-03-27 14:31:47.000000000 -0400
72151 +++ linux-2.6.32.42/usr/gen_init_cpio.c 2011-04-17 15:56:46.000000000 -0400
72152 @@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name,
72153 int retval;
72154 int rc = -1;
72155 int namesize;
72156 - int i;
72157 + unsigned int i;
72158
72159 mode |= S_IFREG;
72160
72161 @@ -383,9 +383,10 @@ static char *cpio_replace_env(char *new_
72162 *env_var = *expanded = '\0';
72163 strncat(env_var, start + 2, end - start - 2);
72164 strncat(expanded, new_location, start - new_location);
72165 - strncat(expanded, getenv(env_var), PATH_MAX);
72166 - strncat(expanded, end + 1, PATH_MAX);
72167 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
72168 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
72169 strncpy(new_location, expanded, PATH_MAX);
72170 + new_location[PATH_MAX] = 0;
72171 } else
72172 break;
72173 }
72174 diff -urNp linux-2.6.32.42/virt/kvm/kvm_main.c linux-2.6.32.42/virt/kvm/kvm_main.c
72175 --- linux-2.6.32.42/virt/kvm/kvm_main.c 2011-03-27 14:31:47.000000000 -0400
72176 +++ linux-2.6.32.42/virt/kvm/kvm_main.c 2011-04-23 21:41:37.000000000 -0400
72177 @@ -1748,6 +1748,7 @@ static int kvm_vcpu_release(struct inode
72178 return 0;
72179 }
72180
72181 +/* cannot be const */
72182 static struct file_operations kvm_vcpu_fops = {
72183 .release = kvm_vcpu_release,
72184 .unlocked_ioctl = kvm_vcpu_ioctl,
72185 @@ -2344,6 +2345,7 @@ static int kvm_vm_mmap(struct file *file
72186 return 0;
72187 }
72188
72189 +/* cannot be const */
72190 static struct file_operations kvm_vm_fops = {
72191 .release = kvm_vm_release,
72192 .unlocked_ioctl = kvm_vm_ioctl,
72193 @@ -2431,6 +2433,7 @@ out:
72194 return r;
72195 }
72196
72197 +/* cannot be const */
72198 static struct file_operations kvm_chardev_ops = {
72199 .unlocked_ioctl = kvm_dev_ioctl,
72200 .compat_ioctl = kvm_dev_ioctl,
72201 @@ -2494,7 +2497,7 @@ asmlinkage void kvm_handle_fault_on_rebo
72202 if (kvm_rebooting)
72203 /* spin while reset goes on */
72204 while (true)
72205 - ;
72206 + cpu_relax();
72207 /* Fault while not rebooting. We want the trace. */
72208 BUG();
72209 }
72210 @@ -2714,7 +2717,7 @@ static void kvm_sched_out(struct preempt
72211 kvm_arch_vcpu_put(vcpu);
72212 }
72213
72214 -int kvm_init(void *opaque, unsigned int vcpu_size,
72215 +int kvm_init(const void *opaque, unsigned int vcpu_size,
72216 struct module *module)
72217 {
72218 int r;
72219 @@ -2767,7 +2770,7 @@ int kvm_init(void *opaque, unsigned int
72220 /* A kmem cache lets us meet the alignment requirements of fx_save. */
72221 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
72222 __alignof__(struct kvm_vcpu),
72223 - 0, NULL);
72224 + SLAB_USERCOPY, NULL);
72225 if (!kvm_vcpu_cache) {
72226 r = -ENOMEM;
72227 goto out_free_5;